1
0
mirror of git://sourceware.org/git/lvm2.git synced 2026-01-25 00:32:58 +03:00

Compare commits

..

26 Commits

Author SHA1 Message Date
David Teigland
0d54125830 system_id: new appmachineid option
The new system_id_source="appmachineid" will cause
lvm to use an lvm-specific derivation of the machine-id,
instead of the machine-id directly.  This is now
recommended in place of using machineid.
2020-05-21 13:33:19 -05:00
David Teigland
2a304d7a75 lvmcache: free vginfo lock_type 2020-05-14 10:20:08 -05:00
David Teigland
5c095400de hints: free hint structs on exit
and free on a couple error paths.
2020-05-13 17:20:16 -05:00
David Teigland
2f29765e7f devs: add some checks for a dev with no path name
It's possible for a dev-cache entry to remain after all
paths for it have been removed, and other parts of the
code expect that a dev always has a name.  A better fix
may be to remove a device from dev-cache after all paths
to it have been removed.
2020-05-13 16:26:26 -05:00
David Teigland
2d1fe38d84 lvmlockd: use 4K sector size when any dev is 4K
When either logical block size or physical block size is 4K,
then lvmlockd creates sanlock leases based on 4K sectors,
but the lvm client side would create the internal lvmlock LV
based on the first logical block size it saw in the VG,
which could be 512.  This could cause the lvmlock LV to be
too small to hold all the sanlock leases. Make the lvm client
side use the same sizing logic as lvmlockd.
2020-05-11 13:14:55 -05:00
Marian Csontos
33265467f9 spec: Enable integrity 2020-05-05 14:12:32 +02:00
David Teigland
5263551a2d lvmlockd: replace lock adopt info source
The lock adopt feature was disabled since it had used
lvmetad as a source of info.  This replaces the lvmetad
info with a local file and enables the adopt feature again
(enabled with lvmlockd --adopt 1).
2020-05-04 13:35:03 -05:00
David Teigland
d945b53ff7 remove vg_read_error
Once converted results to error numbers but is now just a null check.
2020-04-24 11:14:29 -05:00
David Teigland
4047a32128 use refresh_filters only where needed
Filters are changed and need refresh in only one
place (vgimportclone), so avoid doing the refresh
for every other command that doesn't need it.
2020-04-22 14:08:54 -05:00
Maxim Plotnikov
a509776588 Fix scripts/lvmlocks.service.in using nonexistent --lock-opt autowait
The --lock-opt autowait was dropped back in 9ab6bdce01,
and attempting to specify it has quite an opposite effect:
no waiting is done, which makes the unit almost useless.
2020-04-21 16:52:45 -05:00
David Teigland
d79afd4084 lvmcache: rework handling of VGs with duplicate vgnames
The previous method of managing duplicate vgnames prevented
vgreduce from working if a foreign vg with the same name
existed.
2020-04-21 14:40:34 -05:00
David Teigland
cc4051eec0 pass cmd struct through more functions
no functional change
2020-04-21 10:58:05 -05:00
David Teigland
3854931aea lvmcache_get_mda: remove unused function 2020-04-21 10:58:05 -05:00
David Teigland
2aa36209eb vgrename: fix error value when name exists 2020-04-21 09:33:56 -05:00
David Teigland
211eaa284c WHATS_NEW: integrity with raid 2020-04-15 12:10:39 -05:00
David Teigland
d9e8895a96 Allow dm-integrity to be used for raid images
dm-integrity stores checksums of the data written to an
LV, and returns an error if data read from the LV does
not match the previously saved checksum.  When used on
raid images, dm-raid will correct the error by reading
the block from another image, and the device user sees
no error.  The integrity metadata (checksums) are stored
on an internal LV allocated by lvm for each linear image.
The internal LV is allocated on the same PV as the image.

Create a raid LV with an integrity layer over each
raid image (for raid levels 1,4,5,6,10):

lvcreate --type raidN --raidintegrity y [options]

Add an integrity layer to images of an existing raid LV:

lvconvert --raidintegrity y LV

Remove the integrity layer from images of a raid LV:

lvconvert --raidintegrity n LV

Settings

Use --raidintegritymode journal|bitmap (journal is default)
to configure the method used by dm-integrity to ensure
crash consistency.

Initialization

When integrity is added to an LV, the kernel needs to
initialize the integrity metadata/checksums for all blocks
in the LV.  The data corruption checking performed by
dm-integrity will only operate on areas of the LV that
are already initialized.  The progress of integrity
initialization is reported by the "syncpercent" LV
reporting field (and under the Cpy%Sync lvs column.)

Example: create a raid1 LV with integrity:

$ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo
  Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB.
  Logical volume "rr_rimage_0_imeta" created.
  Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB.
  Logical volume "rr_rimage_1_imeta" created.
  Logical volume "rr" created.
$ lvs -a foo
  LV                  VG  Attr       LSize  Origin              Cpy%Sync
  rr                  foo rwi-a-r---  1.00g                     4.93
  [rr_rimage_0]       foo gwi-aor---  1.00g [rr_rimage_0_iorig] 41.02
  [rr_rimage_0_imeta] foo ewi-ao---- 12.00m
  [rr_rimage_0_iorig] foo -wi-ao----  1.00g
  [rr_rimage_1]       foo gwi-aor---  1.00g [rr_rimage_1_iorig] 39.45
  [rr_rimage_1_imeta] foo ewi-ao---- 12.00m
  [rr_rimage_1_iorig] foo -wi-ao----  1.00g
  [rr_rmeta_0]        foo ewi-aor---  4.00m
  [rr_rmeta_1]        foo ewi-aor---  4.00m
2020-04-15 12:10:32 -05:00
David Teigland
b6b4ad8e28 move pv_list code into lib 2020-04-13 10:04:14 -05:00
Peter Rajnoha
0dd905c959 blkdeactivate: add support for VDO in blkdeactivate script
Make it possible to tear down VDO volumes with blkdeactivate if VDO is
part of a device stack (and if VDO binary is installed). Also, support
optional -o|--vdooptions configfile=file.
2020-04-09 15:29:29 +02:00
Zdenek Kabelac
e10f20bc23 WHATS_NEWS: update 2020-04-08 15:37:24 +02:00
Zdenek Kabelac
3dd11d9ea8 test: repair of thin-pool used by foreign apps 2020-04-08 15:37:24 +02:00
Zdenek Kabelac
98e33ee3fb lvconvert: no validation for thin-pools not used by lvm2
lvm2 supports thin-pool to be later used by other tools doing
virtual volumes themself (i.e. docker) - in this case we
shall not validate transaction Id - is this is used by
other tools and lvm2 keeps value 0 - so the transationId
validation need to be skipped in this case.
2020-04-08 15:22:44 +02:00
Marian Csontos
06cbe3cfc6 post-release 2020-03-26 12:22:09 +01:00
Marian Csontos
e1c2b41265 pre-release 2020-03-26 12:21:16 +01:00
Zdenek Kabelac
caff31df19 vdo: make vdopool wrapping device is read-only
When vdopool is activated standalone - we use a wrapping linear device
to hold actual vdo device active - for this we can set-up read-only
device to ensure there cannot be made write through this device to
actual pool device.
2020-03-23 17:13:26 +01:00
Marian Csontos
e6b93dc24e test: Fix previous commit 2020-03-18 18:03:12 +01:00
Marian Csontos
fc32787c1b test: Can not attach writecache to active volume 2020-03-18 14:35:58 +01:00
53 changed files with 2254 additions and 658 deletions

View File

@@ -1 +1 @@
2.03.09(2)-git (2020-02-11)
2.03.10(2)-git (2020-03-26)

View File

@@ -1 +1 @@
1.02.171-git (2020-02-11)
1.02.173-git (2020-03-26)

View File

@@ -1,5 +1,10 @@
Version 2.03.09 -
====================================
Version 2.03.10 -
=================================
Add integrity with raid capability.
Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
Version 2.03.09 - 26th March 2020
=================================
Fix formating of vdopool (vdo_slab_size_mb was smaller by 2 bits).
Fix showing of a dm kernel error when uncaching a volume with cachevol.

View File

@@ -1,5 +1,9 @@
Version 1.02.171 -
=====================================
Version 1.02.173 -
==================================
Add support for VDO in blkdeactivate script.
Version 1.02.171 - 26th March 2020
==================================
Fix dm_list interators with gcc 10 optimization (-ftree-pta).
Dmeventd handles timer without looping on short intervals.

View File

@@ -1047,6 +1047,26 @@ if test "$NOTIFYDBUS_SUPPORT" = yes; then
PKG_CHECK_MODULES(NOTIFY_DBUS, systemd >= 221, [HAVE_NOTIFY_DBUS=yes], $bailout)
fi
################################################################################
dnl -- Build appmachineid
AC_MSG_CHECKING(whether to build appmachineid)
AC_ARG_ENABLE(app-machineid,
AC_HELP_STRING([--enable-app-machineid],
[enable LVM system ID using app-specific machine-id]),
APP_MACHINEID_SUPPORT=$enableval, APP_MACHINEID_SUPPORT=no)
AC_MSG_RESULT($APP_MACHINEID_SUPPORT)
if test "$APP_MACHINEID_SUPPORT" = yes; then
AC_DEFINE([APP_MACHINEID_SUPPORT], 1, [Define to 1 to include code that uses libsystemd machine-id apis.])
SYSTEMD_LIBS="-lsystemd"
fi
################################################################################
dnl -- Look for libsystemd libraries
if test "$APP_MACHINEID_SUPPORT" = yes; then
PKG_CHECK_MODULES(APP_MACHINEID, systemd >= 234, [HAVE_APP_MACHINEID=yes], $bailout)
fi
################################################################################
dnl -- Enable blkid wiping functionality

View File

@@ -14,6 +14,7 @@
#include "libdaemon/client/daemon-client.h"
#define LVMLOCKD_SOCKET DEFAULT_RUN_DIR "/lvmlockd.socket"
#define LVMLOCKD_ADOPT_FILE DEFAULT_RUN_DIR "/lvmlockd.adopt"
/* Wrappers to open/close connection */

View File

@@ -38,6 +38,8 @@
#define EXTERN
#include "lvmlockd-internal.h"
static int str_to_mode(const char *str);
/*
* Basic operation of lvmlockd
*
@@ -142,6 +144,8 @@ static const char *lvmlockd_protocol = "lvmlockd";
static const int lvmlockd_protocol_version = 1;
static int daemon_quit;
static int adopt_opt;
static uint32_t adopt_update_count;
static const char *adopt_file;
/*
* We use a separate socket for dumping daemon info.
@@ -811,6 +815,144 @@ int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsi
return 0;
}
/*
* Write new info when a command exits if that command has acquired a new LV
* lock. If the command has released an LV lock we don't bother updating the
* info. When adopting, we eliminate any LV lock adoptions if there is no dm
* device for that LV. If lvmlockd is terminated after acquiring but before
* writing this file, those LV locks would not be adopted on restart.
*/
#define ADOPT_VERSION_MAJOR 1
#define ADOPT_VERSION_MINOR 0
static void write_adopt_file(void)
{
struct lockspace *ls;
struct resource *r;
struct lock *lk;
time_t t;
FILE *fp;
if (!(fp = fopen(adopt_file, "w")))
return;
adopt_update_count++;
t = time(NULL);
fprintf(fp, "lvmlockd adopt_version %u.%u pid %d updates %u %s",
ADOPT_VERSION_MAJOR, ADOPT_VERSION_MINOR, getpid(), adopt_update_count, ctime(&t));
pthread_mutex_lock(&lockspaces_mutex);
list_for_each_entry(ls, &lockspaces, list) {
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
continue;
fprintf(fp, "VG: %38s %s %s %s\n",
ls->vg_uuid, ls->vg_name, lm_str(ls->lm_type), ls->vg_args);
list_for_each_entry(r, &ls->resources, list) {
if (r->type != LD_RT_LV)
continue;
if ((r->mode != LD_LK_EX) && (r->mode != LD_LK_SH))
continue;
list_for_each_entry(lk, &r->locks, list) {
fprintf(fp, "LV: %38s %s %s %s %u\n",
ls->vg_uuid, r->name, r->lv_args, mode_str(r->mode), r->version);
}
}
}
pthread_mutex_unlock(&lockspaces_mutex);
fflush(fp);
fclose(fp);
}
static int read_adopt_file(struct list_head *vg_lockd)
{
char adopt_line[512];
char vg_uuid[72];
char lm_type_str[16];
char mode[8];
struct lockspace *ls, *ls2;
struct resource *r;
FILE *fp;
if (MAX_ARGS != 64 || MAX_NAME != 64)
return -1;
if (!(fp = fopen(adopt_file, "r")))
return 0;
while (fgets(adopt_line, sizeof(adopt_line), fp)) {
if (adopt_line[0] == '#')
continue;
else if (!strncmp(adopt_line, "lvmlockd", 8)) {
unsigned int v_major = 0, v_minor = 0;
sscanf(adopt_line, "lvmlockd adopt_version %u.%u", &v_major, &v_minor);
if (v_major != ADOPT_VERSION_MAJOR)
goto fail;
} else if (!strncmp(adopt_line, "VG:", 3)) {
if (!(ls = alloc_lockspace()))
goto fail;
memset(vg_uuid, 0, sizeof(vg_uuid));
if (sscanf(adopt_line, "VG: %63s %64s %16s %64s",
vg_uuid, ls->vg_name, lm_type_str, ls->vg_args) != 4) {
goto fail;
}
memcpy(ls->vg_uuid, vg_uuid, 64);
if ((ls->lm_type = str_to_lm(lm_type_str)) < 0)
goto fail;
list_add(&ls->list, vg_lockd);
} else if (!strncmp(adopt_line, "LV:", 3)) {
if (!(r = alloc_resource()))
goto fail;
r->type = LD_RT_LV;
memset(vg_uuid, 0, sizeof(vg_uuid));
if (sscanf(adopt_line, "LV: %64s %64s %s %8s %u",
vg_uuid, r->name, r->lv_args, mode, &r->version) != 5) {
goto fail;
}
if ((r->adopt_mode = str_to_mode(mode)) == LD_LK_IV)
goto fail;
if (ls && !memcmp(ls->vg_uuid, vg_uuid, 64)) {
list_add(&r->list, &ls->resources);
r = NULL;
} else {
list_for_each_entry(ls2, vg_lockd, list) {
if (memcmp(ls2->vg_uuid, vg_uuid, 64))
continue;
list_add(&r->list, &ls2->resources);
r = NULL;
break;
}
}
if (r) {
log_error("No lockspace found for resource %s vg_uuid %s", r->name, vg_uuid);
goto fail;
}
}
}
fclose(fp);
return 0;
fail:
fclose(fp);
return -1;
}
/*
* These are few enough that arrays of function pointers can
* be avoided.
@@ -4689,6 +4831,7 @@ static void *client_thread_main(void *arg_in)
struct client *cl;
struct action *act;
struct action *act_un;
uint32_t lock_acquire_count = 0, lock_acquire_written = 0;
int rv;
while (1) {
@@ -4720,6 +4863,9 @@ static void *client_thread_main(void *arg_in)
rv = -1;
}
if (act->flags & LD_AF_LV_LOCK)
lock_acquire_count++;
/*
* The client failed after we acquired an LV lock for
* it, but before getting this reply saying it's done.
@@ -4741,6 +4887,11 @@ static void *client_thread_main(void *arg_in)
continue;
}
if (adopt_opt && (lock_acquire_count > lock_acquire_written)) {
lock_acquire_written = lock_acquire_count;
write_adopt_file();
}
/*
* Queue incoming actions for lockspace threads
*/
@@ -4814,6 +4965,8 @@ static void *client_thread_main(void *arg_in)
pthread_mutex_unlock(&client_mutex);
}
out:
if (adopt_opt && lock_acquire_written)
unlink(adopt_file);
return NULL;
}
@@ -4846,180 +4999,6 @@ static void close_client_thread(void)
log_error("pthread_join client_thread error %d", perrno);
}
/*
* Get a list of all VGs with a lockd type (sanlock|dlm).
* We'll match this list against a list of existing lockspaces that are
* found in the lock manager.
*
* For each of these VGs, also create a struct resource on ls->resources to
* represent each LV in the VG that uses a lock. For each of these LVs
* that are active, we'll attempt to adopt a lock.
*/
static int get_lockd_vgs(struct list_head *vg_lockd)
{
/* FIXME: get VGs some other way */
return -1;
#if 0
struct list_head update_vgs;
daemon_reply reply;
struct dm_config_node *cn;
struct dm_config_node *metadata;
struct dm_config_node *md_cn;
struct dm_config_node *lv_cn;
struct lockspace *ls, *safe;
struct resource *r;
const char *vg_name;
const char *vg_uuid;
const char *lv_uuid;
const char *lock_type;
const char *lock_args;
char find_str_path[PATH_MAX];
int rv = 0;
INIT_LIST_HEAD(&update_vgs);
reply = send_lvmetad("vg_list", "token = %s", "skip", NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
log_error("vg_list from lvmetad failed %d", reply.error);
rv = -EINVAL;
goto destroy;
}
if (!(cn = dm_config_find_node(reply.cft->root, "volume_groups"))) {
log_error("get_lockd_vgs no vgs");
rv = -EINVAL;
goto destroy;
}
/* create an update_vgs list of all vg uuids */
for (cn = cn->child; cn; cn = cn->sib) {
vg_uuid = cn->key;
if (!(ls = alloc_lockspace())) {
rv = -ENOMEM;
break;
}
strncpy(ls->vg_uuid, vg_uuid, 64);
list_add_tail(&ls->list, &update_vgs);
log_debug("get_lockd_vgs %s", vg_uuid);
}
destroy:
daemon_reply_destroy(reply);
if (rv < 0)
goto out;
/* get vg_name and lock_type for each vg uuid entry in update_vgs */
list_for_each_entry(ls, &update_vgs, list) {
reply = send_lvmetad("vg_lookup",
"token = %s", "skip",
"uuid = %s", ls->vg_uuid,
NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
log_error("vg_lookup from lvmetad failed %d", reply.error);
rv = -EINVAL;
goto next;
}
vg_name = daemon_reply_str(reply, "name", NULL);
if (!vg_name) {
log_error("get_lockd_vgs %s no name", ls->vg_uuid);
rv = -EINVAL;
goto next;
}
strncpy(ls->vg_name, vg_name, MAX_NAME);
metadata = dm_config_find_node(reply.cft->root, "metadata");
if (!metadata) {
log_error("get_lockd_vgs %s name %s no metadata",
ls->vg_uuid, ls->vg_name);
rv = -EINVAL;
goto next;
}
lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL);
ls->lm_type = str_to_lm(lock_type);
if ((ls->lm_type != LD_LM_SANLOCK) && (ls->lm_type != LD_LM_DLM)) {
log_debug("get_lockd_vgs %s not lockd type", ls->vg_name);
continue;
}
lock_args = dm_config_find_str(metadata, "metadata/lock_args", NULL);
if (lock_args)
strncpy(ls->vg_args, lock_args, MAX_ARGS);
log_debug("get_lockd_vgs %s lock_type %s lock_args %s",
ls->vg_name, lock_type, lock_args ?: "none");
/*
* Make a record (struct resource) of each lv that uses a lock.
* For any lv that uses a lock, we'll check if the lv is active
* and if so try to adopt a lock for it.
*/
for (md_cn = metadata->child; md_cn; md_cn = md_cn->sib) {
if (strcmp(md_cn->key, "logical_volumes"))
continue;
for (lv_cn = md_cn->child; lv_cn; lv_cn = lv_cn->sib) {
snprintf(find_str_path, PATH_MAX, "%s/lock_args", lv_cn->key);
lock_args = dm_config_find_str(lv_cn, find_str_path, NULL);
if (!lock_args)
continue;
snprintf(find_str_path, PATH_MAX, "%s/id", lv_cn->key);
lv_uuid = dm_config_find_str(lv_cn, find_str_path, NULL);
if (!lv_uuid) {
log_error("get_lock_vgs no lv id for name %s", lv_cn->key);
continue;
}
if (!(r = alloc_resource())) {
rv = -ENOMEM;
goto next;
}
r->use_vb = 0;
r->type = LD_RT_LV;
strncpy(r->name, lv_uuid, MAX_NAME);
if (lock_args)
strncpy(r->lv_args, lock_args, MAX_ARGS);
list_add_tail(&r->list, &ls->resources);
log_debug("get_lockd_vgs %s lv %s %s (name %s)",
ls->vg_name, r->name, lock_args ? lock_args : "", lv_cn->key);
}
}
next:
daemon_reply_destroy(reply);
if (rv < 0)
break;
}
out:
/* Return lockd VG's on the vg_lockd list. */
list_for_each_entry_safe(ls, safe, &update_vgs, list) {
list_del(&ls->list);
if ((ls->lm_type == LD_LM_SANLOCK) || (ls->lm_type == LD_LM_DLM))
list_add_tail(&ls->list, vg_lockd);
else
free(ls);
}
return rv;
#endif
}
static char _dm_uuid[DM_UUID_LEN];
static char *get_dm_uuid(char *dm_name)
@@ -5236,9 +5215,9 @@ static void adopt_locks(void)
INIT_LIST_HEAD(&to_unlock);
/*
* Get list of lockspaces from lock managers.
* Get list of VGs from lvmetad with a lockd type.
* Get list of active lockd type LVs from /dev.
* Get list of lockspaces from currently running lock managers.
* Get list of shared VGs from file written by prior lvmlockd.
* Get list of active LVs (in the shared VGs) from the file.
*/
if (lm_support_dlm() && lm_is_running_dlm()) {
@@ -5262,12 +5241,17 @@ static void adopt_locks(void)
* Adds a struct lockspace to vg_lockd for each lockd VG.
* Adds a struct resource to ls->resources for each LV.
*/
rv = get_lockd_vgs(&vg_lockd);
rv = read_adopt_file(&vg_lockd);
if (rv < 0) {
log_error("adopt_locks get_lockd_vgs failed");
log_error("adopt_locks read_adopt_file failed");
goto fail;
}
if (list_empty(&vg_lockd)) {
log_debug("No lockspaces in adopt file");
return;
}
/*
* For each resource on each lockspace, check if the
* corresponding LV is active. If so, leave the
@@ -5506,7 +5490,7 @@ static void adopt_locks(void)
goto fail;
act->op = LD_OP_LOCK;
act->rt = LD_RT_LV;
act->mode = LD_LK_EX;
act->mode = r->adopt_mode;
act->flags = (LD_AF_ADOPT | LD_AF_PERSISTENT);
act->client_id = INTERNAL_CLIENT_ID;
act->lm_type = ls->lm_type;
@@ -5604,8 +5588,9 @@ static void adopt_locks(void)
* Adopt failed because the orphan has a different mode
* than initially requested. Repeat the lock-adopt operation
* with the other mode. N.B. this logic depends on first
* trying sh then ex for GL/VG locks, and ex then sh for
* LV locks.
* trying sh then ex for GL/VG locks; for LV locks the mode
* from the adopt file is tried first, the alternate
* (if the mode in adopt file was wrong somehow.)
*/
if ((act->rt != LD_RT_LV) && (act->mode == LD_LK_SH)) {
@@ -5613,9 +5598,12 @@ static void adopt_locks(void)
act->mode = LD_LK_EX;
rv = add_lock_action(act);
} else if ((act->rt == LD_RT_LV) && (act->mode == LD_LK_EX)) {
/* LV locks: attempt to adopt sh after ex failed. */
act->mode = LD_LK_SH;
} else if (act->rt == LD_RT_LV) {
/* LV locks: attempt to adopt the other mode. */
if (act->mode == LD_LK_EX)
act->mode = LD_LK_SH;
else if (act->mode == LD_LK_SH)
act->mode = LD_LK_EX;
rv = add_lock_action(act);
} else {
@@ -5750,10 +5738,13 @@ static void adopt_locks(void)
if (count_start_fail || count_adopt_fail)
goto fail;
unlink(adopt_file);
write_adopt_file();
log_debug("adopt_locks done");
return;
fail:
unlink(adopt_file);
log_error("adopt_locks failed, reset host");
}
@@ -6028,6 +6019,8 @@ static void usage(char *prog, FILE *file)
fprintf(file, " Set path to the pid file. [%s]\n", LVMLOCKD_PIDFILE);
fprintf(file, " --socket-path | -s <path>\n");
fprintf(file, " Set path to the socket to listen on. [%s]\n", LVMLOCKD_SOCKET);
fprintf(file, " --adopt-file <path>\n");
fprintf(file, " Set path to the adopt file. [%s]\n", LVMLOCKD_ADOPT_FILE);
fprintf(file, " --syslog-priority | -S err|warning|debug\n");
fprintf(file, " Write log messages from this level up to syslog. [%s]\n", _syslog_num_to_name(LOG_SYSLOG_PRIO));
fprintf(file, " --gl-type | -g <str>\n");
@@ -6063,6 +6056,7 @@ int main(int argc, char *argv[])
{"daemon-debug", no_argument, 0, 'D' },
{"pid-file", required_argument, 0, 'p' },
{"socket-path", required_argument, 0, 's' },
{"adopt-file", required_argument, 0, 128 },
{"gl-type", required_argument, 0, 'g' },
{"host-id", required_argument, 0, 'i' },
{"host-id-file", required_argument, 0, 'F' },
@@ -6085,6 +6079,9 @@ int main(int argc, char *argv[])
switch (c) {
case '0':
break;
case 128:
adopt_file = strdup(optarg);
break;
case 'h':
usage(argv[0], stdout);
exit(EXIT_SUCCESS);
@@ -6146,6 +6143,9 @@ int main(int argc, char *argv[])
if (!ds.socket_path)
ds.socket_path = LVMLOCKD_SOCKET;
if (!adopt_file)
adopt_file = LVMLOCKD_ADOPT_FILE;
/* runs daemon_main/main_loop */
daemon_start(ds);

View File

@@ -398,12 +398,18 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
(void *)1, (void *)1, (void *)1,
NULL, NULL);
if (rv == -1 && errno == -EAGAIN) {
if (rv == -1 && (errno == EAGAIN)) {
log_debug("S %s R %s adopt_dlm adopt mode %d try other mode",
ls->name, r->name, ld_mode);
rv = -EUCLEAN;
goto fail;
}
if (rv == -1 && (errno == ENOENT)) {
log_debug("S %s R %s adopt_dlm adopt mode %d no lock",
ls->name, r->name, ld_mode);
rv = -ENOENT;
goto fail;
}
if (rv < 0) {
log_debug("S %s R %s adopt_dlm mode %d flags %x error %d errno %d",
ls->name, r->name, mode, flags, rv, errno);

View File

@@ -145,6 +145,7 @@ struct resource {
char name[MAX_NAME+1]; /* vg name or lv name */
int8_t type; /* resource type LD_RT_ */
int8_t mode;
int8_t adopt_mode;
unsigned int sh_count; /* number of sh locks on locks list */
uint32_t version;
uint32_t last_client_id; /* last client_id to lock or unlock resource */

View File

@@ -86,7 +86,7 @@ int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts
return 0; /* Keep RAID SubLvs writable */
if (!layer) {
if (lv_is_thin_pool(lv))
if (lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
return 1;
}

561
lib/cache/lvmcache.c vendored
View File

@@ -49,7 +49,7 @@ struct lvmcache_info {
/* One per VG */
struct lvmcache_vginfo {
struct dm_list list; /* Join these vginfos together */
struct dm_list list; /* _vginfos */
struct dm_list infos; /* List head for lvmcache_infos */
struct dm_list outdated_infos; /* vg_read moves info from infos to outdated_infos */
struct dm_list pvsummaries; /* pv_list taken directly from vgsummary */
@@ -58,7 +58,6 @@ struct lvmcache_vginfo {
uint32_t status;
char vgid[ID_LEN + 1];
char _padding[7];
struct lvmcache_vginfo *next; /* Another VG with same name? */
char *creation_host;
char *system_id;
char *lock_type;
@@ -66,8 +65,16 @@ struct lvmcache_vginfo {
size_t mda_size;
int seqno;
bool scan_summary_mismatch; /* vgsummary from devs had mismatching seqno or checksum */
bool has_duplicate_local_vgname; /* this local vg and another local vg have same name */
bool has_duplicate_foreign_vgname; /* this foreign vg and another foreign vg have same name */
};
/*
* Each VG found during scan gets a vginfo struct.
* Each vginfo is in _vginfos and _vgid_hash, and
* _vgname_hash (unless disabled due to duplicate vgnames).
*/
static struct dm_hash_table *_pvid_hash = NULL;
static struct dm_hash_table *_vgid_hash = NULL;
static struct dm_hash_table *_vgname_hash = NULL;
@@ -262,16 +269,6 @@ void lvmcache_get_mdas(struct cmd_context *cmd,
}
}
static void _vginfo_attach_info(struct lvmcache_vginfo *vginfo,
struct lvmcache_info *info)
{
if (!vginfo)
return;
info->vginfo = vginfo;
dm_list_add(&vginfo->infos, &info->list);
}
static void _vginfo_detach_info(struct lvmcache_info *info)
{
if (!dm_list_empty(&info->list)) {
@@ -282,57 +279,80 @@ static void _vginfo_detach_info(struct lvmcache_info *info)
info->vginfo = NULL;
}
/* If vgid supplied, require a match. */
struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname, const char *vgid)
static struct lvmcache_vginfo *_search_vginfos_list(const char *vgname, const char *vgid)
{
struct lvmcache_vginfo *vginfo;
if (!vgname)
return lvmcache_vginfo_from_vgid(vgid);
if (!_vgname_hash) {
log_debug_cache(INTERNAL_ERROR "Internal lvmcache is no yet initialized.");
return NULL;
}
if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname))) {
log_debug_cache("lvmcache has no info for vgname \"%s\"%s" FMTVGID ".",
vgname, (vgid) ? " with VGID " : "", (vgid) ? : "");
return NULL;
}
if (vgid)
do
if (!strncmp(vgid, vginfo->vgid, ID_LEN))
if (vgid) {
dm_list_iterate_items(vginfo, &_vginfos) {
if (!strcmp(vgid, vginfo->vgid))
return vginfo;
while ((vginfo = vginfo->next));
if (!vginfo)
log_debug_cache("lvmcache has not found vgname \"%s\"%s" FMTVGID ".",
vgname, (vgid) ? " with VGID " : "", (vgid) ? : "");
return vginfo;
}
} else {
dm_list_iterate_items(vginfo, &_vginfos) {
if (!strcmp(vgname, vginfo->vgname))
return vginfo;
}
}
return NULL;
}
struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid)
static struct lvmcache_vginfo *_vginfo_lookup(const char *vgname, const char *vgid)
{
struct lvmcache_vginfo *vginfo;
char id[ID_LEN + 1] __attribute__((aligned(8)));
if (!_vgid_hash || !vgid) {
log_debug_cache(INTERNAL_ERROR "Internal cache cannot lookup vgid.");
return NULL;
if (vgid) {
/* vgid not necessarily NULL-terminated */
(void) dm_strncpy(id, vgid, sizeof(id));
if ((vginfo = dm_hash_lookup(_vgid_hash, id))) {
if (vgname && strcmp(vginfo->vgname, vgname)) {
/* should never happen */
log_error(INTERNAL_ERROR "vginfo_lookup vgid %s has two names %s %s",
id, vginfo->vgname, vgname);
return NULL;
}
return vginfo;
} else {
/* lookup by vgid that doesn't exist */
return NULL;
}
}
/* vgid not necessarily NULL-terminated */
(void) dm_strncpy(id, vgid, sizeof(id));
if (!(vginfo = dm_hash_lookup(_vgid_hash, id))) {
log_debug_cache("lvmcache has no info for vgid \"%s\"", id);
return NULL;
if (vgname && !_found_duplicate_vgnames) {
if ((vginfo = dm_hash_lookup(_vgname_hash, vgname))) {
if (vginfo->has_duplicate_local_vgname) {
/* should never happen, found_duplicate_vgnames should be set */
log_error(INTERNAL_ERROR "vginfo_lookup %s %s has_duplicate_local_vgname", vgname, vgid);
return NULL;
}
return vginfo;
}
}
return vginfo;
if (vgname && _found_duplicate_vgnames) {
if ((vginfo = _search_vginfos_list(vgname, vgid))) {
if (vginfo->has_duplicate_local_vgname) {
log_debug("vginfo_lookup %s %s has_duplicate_local_vgname return none", vgname, vgid);
return NULL;
}
return vginfo;
}
}
/* lookup by vgname that doesn't exist */
return NULL;
}
struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname, const char *vgid)
{
return _vginfo_lookup(vgname, vgid);
}
struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid)
{
return _vginfo_lookup(NULL, vgid);
}
const char *lvmcache_vgname_from_vgid(struct dm_pool *mem, const char *vgid)
@@ -353,17 +373,43 @@ const char *lvmcache_vgid_from_vgname(struct cmd_context *cmd, const char *vgnam
{
struct lvmcache_vginfo *vginfo;
if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
return_NULL;
if (_found_duplicate_vgnames) {
if (!(vginfo = _search_vginfos_list(vgname, NULL)))
return_NULL;
} else {
if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
return_NULL;
}
if (!vginfo->next)
return dm_pool_strdup(cmd->mem, vginfo->vgid);
if (vginfo->has_duplicate_local_vgname) {
/*
* return NULL if there is a local VG with the same name since
* we don't know which to use.
*/
return NULL;
}
/*
* There are multiple VGs with this name to choose from.
* Return an error because we don't know which VG is intended.
*/
return NULL;
if (vginfo->has_duplicate_foreign_vgname)
return NULL;
return dm_pool_strdup(cmd->mem, vginfo->vgid);
}
bool lvmcache_has_duplicate_local_vgname(const char *vgid, const char *vgname)
{
struct lvmcache_vginfo *vginfo;
if (_found_duplicate_vgnames) {
if (!(vginfo = _search_vginfos_list(vgname, vgid)))
return false;
} else {
if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
return false;
}
if (vginfo->has_duplicate_local_vgname)
return true;
return false;
}
/*
@@ -986,15 +1032,6 @@ int lvmcache_label_scan(struct cmd_context *cmd)
log_debug_cache("Finding VG info");
/* FIXME: can this happen? */
if (!cmd->filter) {
log_error("label scan is missing filter");
goto out;
}
if (!refresh_filters(cmd))
log_error("Scan failed to refresh device filter.");
/*
* Duplicates found during this label scan are added to _initial_duplicates.
*/
@@ -1057,7 +1094,6 @@ int lvmcache_label_scan(struct cmd_context *cmd)
r = 1;
out:
dm_list_iterate_items(vginfo, &_vginfos) {
if (is_orphan_vg(vginfo->vgname))
continue;
@@ -1148,49 +1184,20 @@ int lvmcache_pvid_in_unused_duplicates(const char *pvid)
return 0;
}
static int _free_vginfo(struct lvmcache_vginfo *vginfo)
static void _free_vginfo(struct lvmcache_vginfo *vginfo)
{
struct lvmcache_vginfo *primary_vginfo, *vginfo2;
int r = 1;
vginfo2 = primary_vginfo = lvmcache_vginfo_from_vgname(vginfo->vgname, NULL);
if (vginfo == primary_vginfo) {
dm_hash_remove(_vgname_hash, vginfo->vgname);
if (vginfo->next && !dm_hash_insert(_vgname_hash, vginfo->vgname,
vginfo->next)) {
log_error("_vgname_hash re-insertion for %s failed",
vginfo->vgname);
r = 0;
}
} else
while (vginfo2) {
if (vginfo2->next == vginfo) {
vginfo2->next = vginfo->next;
break;
}
vginfo2 = vginfo2->next;
}
free(vginfo->system_id);
free(vginfo->vgname);
free(vginfo->system_id);
free(vginfo->creation_host);
if (*vginfo->vgid && _vgid_hash &&
lvmcache_vginfo_from_vgid(vginfo->vgid) == vginfo)
dm_hash_remove(_vgid_hash, vginfo->vgid);
dm_list_del(&vginfo->list);
if (vginfo->lock_type)
free(vginfo->lock_type);
free(vginfo);
return r;
}
/*
* vginfo must be info->vginfo unless info is NULL
* Remove vginfo from standard lists/hashes.
*/
static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo)
static void _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo)
{
if (info)
_vginfo_detach_info(info);
@@ -1198,12 +1205,16 @@ static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vgin
/* vginfo still referenced? */
if (!vginfo || is_orphan_vg(vginfo->vgname) ||
!dm_list_empty(&vginfo->infos))
return 1;
return;
if (!_free_vginfo(vginfo))
return_0;
if (dm_hash_lookup(_vgname_hash, vginfo->vgname) == vginfo)
dm_hash_remove(_vgname_hash, vginfo->vgname);
return 1;
dm_hash_remove(_vgid_hash, vginfo->vgid);
dm_list_del(&vginfo->list); /* _vginfos list */
_free_vginfo(vginfo);
}
void lvmcache_del(struct lvmcache_info *info)
@@ -1261,180 +1272,150 @@ static int _lvmcache_update_vgid(struct lvmcache_info *info,
return 1;
}
static int _insert_vginfo(struct lvmcache_vginfo *new_vginfo, const char *vgid,
uint32_t vgstatus, const char *creation_host,
struct lvmcache_vginfo *primary_vginfo)
{
struct lvmcache_vginfo *last_vginfo = primary_vginfo;
char uuid_primary[64] __attribute__((aligned(8)));
char uuid_new[64] __attribute__((aligned(8)));
int use_new = 0;
/* Pre-existing VG takes precedence. Unexported VG takes precedence. */
if (primary_vginfo) {
if (!id_write_format((const struct id *)vgid, uuid_new, sizeof(uuid_new)))
return_0;
if (!id_write_format((const struct id *)&primary_vginfo->vgid, uuid_primary,
sizeof(uuid_primary)))
return_0;
_found_duplicate_vgnames = 1;
/*
* vginfo is kept for each VG with the same name.
* They are saved with the vginfo->next list.
* These checks just decide the ordering of
* that list.
*
* FIXME: it should no longer matter what order
* the vginfo's are kept in, so we can probably
* remove these comparisons and reordering entirely.
*
* If Primary not exported, new exported => keep
* Else Primary exported, new not exported => change
* Else Primary has hostname for this machine => keep
* Else Primary has no hostname, new has one => change
* Else New has hostname for this machine => change
* Else Keep primary.
*/
if (!(primary_vginfo->status & EXPORTED_VG) &&
(vgstatus & EXPORTED_VG))
log_verbose("Cache: Duplicate VG name %s: "
"Existing %s takes precedence over "
"exported %s", new_vginfo->vgname,
uuid_primary, uuid_new);
else if ((primary_vginfo->status & EXPORTED_VG) &&
!(vgstatus & EXPORTED_VG)) {
log_verbose("Cache: Duplicate VG name %s: "
"%s takes precedence over exported %s",
new_vginfo->vgname, uuid_new,
uuid_primary);
use_new = 1;
} else if (primary_vginfo->creation_host &&
!strcmp(primary_vginfo->creation_host,
primary_vginfo->fmt->cmd->hostname))
log_verbose("Cache: Duplicate VG name %s: "
"Existing %s (created here) takes precedence "
"over %s", new_vginfo->vgname, uuid_primary,
uuid_new);
else if (!primary_vginfo->creation_host && creation_host) {
log_verbose("Cache: Duplicate VG name %s: "
"%s (with creation_host) takes precedence over %s",
new_vginfo->vgname, uuid_new,
uuid_primary);
use_new = 1;
} else if (creation_host &&
!strcmp(creation_host,
primary_vginfo->fmt->cmd->hostname)) {
log_verbose("Cache: Duplicate VG name %s: "
"%s (created here) takes precedence over %s",
new_vginfo->vgname, uuid_new,
uuid_primary);
use_new = 1;
} else {
log_verbose("Cache: Duplicate VG name %s: "
"Prefer existing %s vs new %s",
new_vginfo->vgname, uuid_primary, uuid_new);
}
if (!use_new) {
while (last_vginfo->next)
last_vginfo = last_vginfo->next;
last_vginfo->next = new_vginfo;
return 1;
}
dm_hash_remove(_vgname_hash, primary_vginfo->vgname);
}
if (!dm_hash_insert(_vgname_hash, new_vginfo->vgname, new_vginfo)) {
log_error("cache_update: vg hash insertion failed: %s",
new_vginfo->vgname);
return 0;
}
if (primary_vginfo)
new_vginfo->next = primary_vginfo;
return 1;
}
static int _lvmcache_update_vgname(struct lvmcache_info *info,
static int _lvmcache_update_vgname(struct cmd_context *cmd,
struct lvmcache_info *info,
const char *vgname, const char *vgid,
uint32_t vgstatus, const char *creation_host,
const char *system_id,
const struct format_type *fmt)
{
struct lvmcache_vginfo *vginfo, *primary_vginfo;
char mdabuf[32];
char vgid_str[64] __attribute__((aligned(8)));
char other_str[64] __attribute__((aligned(8)));
struct lvmcache_vginfo *vginfo;
struct lvmcache_vginfo *other;
int vginfo_is_allowed;
int other_is_allowed;
if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname)))
return 1;
/* Remove existing vginfo entry */
if (info)
_drop_vginfo(info, info->vginfo);
if (!id_write_format((const struct id *)vgid, vgid_str, sizeof(vgid_str)))
stack;
if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) {
/*
* Add vginfo for orphan VG
*/
if (!info) {
if (!(vginfo = zalloc(sizeof(*vginfo)))) {
log_error("lvmcache adding vg list alloc failed %s", vgname);
return 0;
}
if (!(vginfo->vgname = strdup(vgname))) {
free(vginfo);
log_error("lvmcache adding vg name alloc failed %s", vgname);
return 0;
}
dm_list_init(&vginfo->infos);
dm_list_init(&vginfo->outdated_infos);
dm_list_init(&vginfo->pvsummaries);
vginfo->fmt = fmt;
if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) {
free(vginfo->vgname);
free(vginfo);
return_0;
}
if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) {
free(vginfo->vgname);
free(vginfo);
return_0;
}
/* Ensure orphans appear last on list_iterate */
dm_list_add(&_vginfos, &vginfo->list);
return 1;
}
_drop_vginfo(info, info->vginfo);
if (!(vginfo = lvmcache_vginfo_from_vgid(vgid))) {
/*
* Create a vginfo struct for this VG and put the vginfo
* into the hash table.
*/
log_debug_cache("lvmcache adding vginfo for %s %s", vgname, vgid_str);
if (!(vginfo = zalloc(sizeof(*vginfo)))) {
log_error("lvmcache_update_vgname: list alloc failed");
log_error("lvmcache adding vg list alloc failed %s", vgname);
return 0;
}
if (!(vginfo->vgname = strdup(vgname))) {
free(vginfo);
log_error("cache vgname alloc failed for %s", vgname);
log_error("lvmcache adding vg name alloc failed %s", vgname);
return 0;
}
dm_list_init(&vginfo->infos);
dm_list_init(&vginfo->outdated_infos);
dm_list_init(&vginfo->pvsummaries);
/*
* A different VG (different uuid) can exist with the same name.
* In this case, the two VGs will have separate vginfo structs,
* but the second will be linked onto the existing vginfo->next,
* not in the hash.
*/
primary_vginfo = lvmcache_vginfo_from_vgname(vgname, NULL);
if ((other = dm_hash_lookup(_vgname_hash, vgname))) {
log_debug_cache("lvmcache adding vginfo found duplicate VG name %s", vgname);
if (!_insert_vginfo(vginfo, vgid, vgstatus, creation_host, primary_vginfo)) {
free(vginfo->vgname);
free(vginfo);
return 0;
/*
* A different VG (different uuid) can exist with the
* same name. In this case, the two VGs will have
* separate vginfo structs, but one will be in the
* vgname_hash. If both vginfos are local/accessible,
* then _found_duplicate_vgnames is set which will
* disable any further use of the vgname_hash.
*/
if (!memcmp(other->vgid, vgid, ID_LEN)) {
/* shouldn't happen since we looked up by vgid above */
log_error(INTERNAL_ERROR "lvmcache_update_vgname %s %s %s %s",
vgname, vgid_str, other->vgname, other->vgid);
free(vginfo->vgname);
free(vginfo);
return 0;
}
vginfo_is_allowed = is_system_id_allowed(cmd, system_id);
other_is_allowed = is_system_id_allowed(cmd, other->system_id);
if (vginfo_is_allowed && other_is_allowed) {
if (!id_write_format((const struct id *)other->vgid, other_str, sizeof(other_str)))
stack;
vginfo->has_duplicate_local_vgname = 1;
other->has_duplicate_local_vgname = 1;
_found_duplicate_vgnames = 1;
log_warn("WARNING: VG name %s is used by VGs %s and %s.",
vgname, vgid_str, other_str);
log_warn("Fix duplicate VG names with vgrename uuid, a device filter, or system IDs.");
}
if (!vginfo_is_allowed && !other_is_allowed) {
vginfo->has_duplicate_foreign_vgname = 1;
other->has_duplicate_foreign_vgname = 1;
}
if (!other_is_allowed && vginfo_is_allowed) {
/* the accessible vginfo must be in vgnames_hash */
dm_hash_remove(_vgname_hash, vgname);
if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) {
log_error("lvmcache adding vginfo to name hash failed %s", vgname);
return 0;
}
}
} else {
if (!dm_hash_insert(_vgname_hash, vgname, vginfo)) {
log_error("lvmcache adding vg to name hash failed %s", vgname);
free(vginfo->vgname);
free(vginfo);
return 0;
}
}
/* Ensure orphans appear last on list_iterate */
if (is_orphan_vg(vgname))
dm_list_add(&_vginfos, &vginfo->list);
else
dm_list_add_h(&_vginfos, &vginfo->list);
dm_list_add_h(&_vginfos, &vginfo->list);
}
if (info)
_vginfo_attach_info(vginfo, info);
else if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) /* Orphans */
return_0;
/* FIXME Check consistency of list! */
vginfo->fmt = fmt;
info->vginfo = vginfo;
dm_list_add(&vginfo->infos, &info->list);
if (info) {
if (info->mdas.n)
sprintf(mdabuf, " with %u mda(s)", dm_list_size(&info->mdas));
else
mdabuf[0] = '\0';
log_debug_cache("lvmcache %s: now in VG %s%s%s%s%s.",
dev_name(info->dev),
vgname, vginfo->vgid[0] ? " (" : "",
vginfo->vgid[0] ? vginfo->vgid : "",
vginfo->vgid[0] ? ")" : "", mdabuf);
} else
log_debug_cache("lvmcache: Initialised VG %s.", vgname);
log_debug_cache("lvmcache %s: now in VG %s %s", dev_name(info->dev), vgname, vgid_str);
return 1;
}
@@ -1511,9 +1492,9 @@ out:
return 1;
}
int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt)
int lvmcache_add_orphan_vginfo(struct cmd_context *cmd, const char *vgname, struct format_type *fmt)
{
return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt);
return _lvmcache_update_vgname(cmd, NULL, vgname, vgname, "", fmt);
}
static void _lvmcache_update_pvsummaries(struct lvmcache_vginfo *vginfo, struct lvmcache_vgsummary *vgsummary)
@@ -1532,7 +1513,7 @@ static void _lvmcache_update_pvsummaries(struct lvmcache_vginfo *vginfo, struct
* Returning 0 causes the caller to remove the info struct for this
* device from lvmcache, which will make it look like a missing device.
*/
int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vgsummary *vgsummary)
int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info *info, struct lvmcache_vgsummary *vgsummary)
{
const char *vgname = vgsummary->vgname;
const char *vgid = (char *)&vgsummary->vgid;
@@ -1545,6 +1526,7 @@ int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vg
vgid = vgname;
}
/* FIXME: remove this, it shouldn't be needed */
/* If PV without mdas is already in a real VG, don't make it orphan */
if (is_orphan_vg(vgname) && info->vginfo &&
mdas_empty_or_ignored(&info->mdas) &&
@@ -1556,7 +1538,7 @@ int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vg
* and attaches the info struct for the dev to the vginfo.
* Puts the vginfo into the vgname hash table.
*/
if (!_lvmcache_update_vgname(info, vgname, vgid, vgsummary->vgstatus, vgsummary->creation_host, info->fmt)) {
if (!_lvmcache_update_vgname(cmd, info, vgname, vgid, vgsummary->system_id, info->fmt)) {
/* shouldn't happen, internal error */
log_error("Failed to update VG %s info in lvmcache.", vgname);
return 0;
@@ -1735,7 +1717,7 @@ int lvmcache_update_vg_from_write(struct volume_group *vg)
(void) dm_strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s));
/* FIXME Could pvl->pv->dev->pvid ever be different? */
if ((info = lvmcache_info_from_pvid(pvid_s, pvl->pv->dev, 0)) &&
!lvmcache_update_vgname_and_id(info, &vgsummary))
!lvmcache_update_vgname_and_id(vg->cmd, info, &vgsummary))
return_0;
}
@@ -1819,7 +1801,7 @@ int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted)
* info's for PVs without metadata were not connected to the
* vginfo by label_scan, so do it here.
*/
if (!lvmcache_update_vgname_and_id(info, &vgsummary)) {
if (!lvmcache_update_vgname_and_id(vg->cmd, info, &vgsummary)) {
log_debug_cache("lvmcache_update_vg %s failed to update info for %s",
vg->name, dev_name(info->dev));
}
@@ -1927,7 +1909,7 @@ static struct lvmcache_info * _create_info(struct labeller *labeller, struct dev
return info;
}
struct lvmcache_info *lvmcache_add(struct labeller *labeller,
struct lvmcache_info *lvmcache_add(struct cmd_context *cmd, struct labeller *labeller,
const char *pvid, struct device *dev, uint64_t label_sector,
const char *vgname, const char *vgid, uint32_t vgstatus,
int *is_duplicate)
@@ -2042,7 +2024,7 @@ update_vginfo:
if (vgid)
strncpy((char *)&vgsummary.vgid, vgid, sizeof(vgsummary.vgid));
if (!lvmcache_update_vgname_and_id(info, &vgsummary)) {
if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) {
if (created) {
dm_hash_remove(_pvid_hash, pvid_s);
strcpy(info->dev->pvid, "");
@@ -2055,7 +2037,7 @@ update_vginfo:
return info;
}
static void _lvmcache_destroy_entry(struct lvmcache_info *info)
static void _lvmcache_destroy_info(struct lvmcache_info *info)
{
_vginfo_detach_info(info);
info->dev->pvid[0] = 0;
@@ -2063,20 +2045,11 @@ static void _lvmcache_destroy_entry(struct lvmcache_info *info)
free(info);
}
static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo *vginfo)
{
struct lvmcache_vginfo *next;
do {
next = vginfo->next;
if (!_free_vginfo(vginfo))
stack;
} while ((vginfo = next));
}
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
{
log_debug_cache("Dropping VG info");
struct lvmcache_vginfo *vginfo, *vginfo2;
log_debug_cache("Destroy lvmcache content");
if (_vgid_hash) {
dm_hash_destroy(_vgid_hash);
@@ -2084,20 +2057,24 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
}
if (_pvid_hash) {
dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_entry);
dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_info);
dm_hash_destroy(_pvid_hash);
_pvid_hash = NULL;
}
if (_vgname_hash) {
dm_hash_iter(_vgname_hash,
(dm_hash_iterate_fn) _lvmcache_destroy_vgnamelist);
dm_hash_destroy(_vgname_hash);
_vgname_hash = NULL;
}
dm_list_iterate_items_safe(vginfo, vginfo2, &_vginfos) {
dm_list_del(&vginfo->list);
_free_vginfo(vginfo);
}
if (!dm_list_empty(&_vginfos))
log_error(INTERNAL_ERROR "_vginfos list should be empty");
log_error(INTERNAL_ERROR "vginfos list should be empty");
dm_list_init(&_vginfos);
/*
@@ -2109,6 +2086,8 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
* We want the same preferred devices to be chosen each time, so save
* the unpreferred devs here so that _choose_preferred_devs can use
* this to make the same choice each time.
*
* FIXME: I don't think is is needed any more.
*/
_destroy_device_list(&_prev_unused_duplicate_devs);
dm_list_splice(&_prev_unused_duplicate_devs, &_unused_duplicates);
@@ -2122,7 +2101,7 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
stack;
dm_list_iterate_items(fmt, &cmd->formats) {
if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt))
if (!lvmcache_add_orphan_vginfo(cmd, fmt->orphan_vg_name, fmt))
stack;
}
}
@@ -2567,36 +2546,6 @@ int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, char *pvid)
return 0;
}
struct metadata_area *lvmcache_get_mda(struct cmd_context *cmd,
const char *vgname,
struct device *dev,
int use_mda_num)
{
struct lvmcache_vginfo *vginfo;
struct lvmcache_info *info;
struct metadata_area *mda;
if (!use_mda_num)
use_mda_num = 1;
if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
return NULL;
dm_list_iterate_items(info, &vginfo->infos) {
if (info->dev != dev)
continue;
dm_list_iterate_items(mda, &info->mdas) {
if ((use_mda_num == 1) && (mda->status & MDA_PRIMARY))
return mda;
if ((use_mda_num == 2) && !(mda->status & MDA_PRIMARY))
return mda;
}
return NULL;
}
return NULL;
}
/*
* This is used by the metadata repair command to check if
* the metadata on a dev needs repair because it's old.

12
lib/cache/lvmcache.h vendored
View File

@@ -71,16 +71,16 @@ int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const
int lvmcache_label_rescan_vg_rw(struct cmd_context *cmd, const char *vgname, const char *vgid);
/* Add/delete a device */
struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
struct lvmcache_info *lvmcache_add(struct cmd_context *cmd, struct labeller *labeller, const char *pvid,
struct device *dev, uint64_t label_sector,
const char *vgname, const char *vgid,
uint32_t vgstatus, int *is_duplicate);
int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt);
int lvmcache_add_orphan_vginfo(struct cmd_context *cmd, const char *vgname, struct format_type *fmt);
void lvmcache_del(struct lvmcache_info *info);
void lvmcache_del_dev(struct device *dev);
/* Update things */
int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info *info,
struct lvmcache_vgsummary *vgsummary);
int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted);
int lvmcache_update_vg_from_write(struct volume_group *vg);
@@ -161,11 +161,6 @@ struct device *lvmcache_device(struct lvmcache_info *info);
unsigned lvmcache_mda_count(struct lvmcache_info *info);
uint64_t lvmcache_smallest_mda_size(struct lvmcache_info *info);
struct metadata_area *lvmcache_get_mda(struct cmd_context *cmd,
const char *vgname,
struct device *dev,
int use_mda_num);
bool lvmcache_has_duplicate_devs(void);
void lvmcache_del_dev_from_duplicates(struct device *dev);
bool lvmcache_dev_is_unused_duplicate(struct device *dev);
@@ -174,6 +169,7 @@ int lvmcache_get_unused_duplicates(struct cmd_context *cmd, struct dm_list *head
int vg_has_duplicate_pvs(struct volume_group *vg);
int lvmcache_found_duplicate_vgnames(void);
bool lvmcache_has_duplicate_local_vgname(const char *vgid, const char *vgname);
int lvmcache_contains_lock_type_sanlock(struct cmd_context *cmd);

View File

@@ -40,6 +40,10 @@
#include <syslog.h>
#include <time.h>
#ifdef APP_MACHINEID_SUPPORT
#include <systemd/sd-id128.h>
#endif
#ifdef __linux__
# include <malloc.h>
#endif
@@ -128,9 +132,12 @@ static const char *_read_system_id_from_file(struct cmd_context *cmd, const char
return system_id;
}
/* systemd-id128 new produced: f64406832c2140e8ac5422d1089aae03 */
#define LVM_APPLICATION_ID SD_ID128_MAKE(f6,44,06,83,2c,21,40,e8,ac,54,22,d1,08,9a,ae,03)
static const char *_system_id_from_source(struct cmd_context *cmd, const char *source)
{
char filebuf[PATH_MAX];
char buf[PATH_MAX];
const char *file;
const char *etc_str;
const char *str;
@@ -149,10 +156,23 @@ static const char *_system_id_from_source(struct cmd_context *cmd, const char *s
goto out;
}
#ifdef APP_MACHINEID_SUPPORT
if (!strcasecmp(source, "appmachineid")) {
sd_id128_t id;
sd_id128_get_machine_app_specific(LVM_APPLICATION_ID, &id);
if (dm_snprintf(buf, PATH_MAX, SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(id)) < 0)
stack;
system_id = system_id_from_string(cmd, buf);
goto out;
}
#endif
if (!strcasecmp(source, "machineid") || !strcasecmp(source, "machine-id")) {
etc_str = find_config_tree_str(cmd, global_etc_CFG, NULL);
if (dm_snprintf(filebuf, sizeof(filebuf), "%s/machine-id", etc_str) != -1)
system_id = _read_system_id_from_file(cmd, filebuf);
if (dm_snprintf(buf, sizeof(buf), "%s/machine-id", etc_str) != -1)
system_id = _read_system_id_from_file(cmd, buf);
goto out;
}
@@ -1276,7 +1296,7 @@ int init_lvmcache_orphans(struct cmd_context *cmd)
struct format_type *fmt;
dm_list_iterate_items(fmt, &cmd->formats)
if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt))
if (!lvmcache_add_orphan_vginfo(cmd, fmt->orphan_vg_name, fmt))
return_0;
return 1;
@@ -1598,6 +1618,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
dm_list_init(&cmd->formats);
dm_list_init(&cmd->segtypes);
dm_list_init(&cmd->tags);
dm_list_init(&cmd->hints);
dm_list_init(&cmd->config_files);
label_init();

View File

@@ -1224,10 +1224,12 @@ cfg(global_system_id_source_CFG, "system_id_source", global_CFG_SECTION, 0, CFG_
" uname\n"
" Set the system ID from the hostname (uname) of the system.\n"
" System IDs beginning localhost are not permitted.\n"
" appmachineid\n"
" Use an LVM-specific derivation of the local machine-id as the\n"
" system ID. See 'man machine-id'.\n"
" machineid\n"
" Use the contents of the machine-id file to set the system ID.\n"
" Some systems create this file at installation time.\n"
" See 'man machine-id' and global/etc.\n"
" Use the contents of the machine-id file to set the system ID\n"
" (appmachineid is recommended.)\n"
" file\n"
" Use the contents of another file (system_id_file) to set the\n"
" system ID.\n"

View File

@@ -86,6 +86,9 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
int fd = dev->bcache_fd;
int do_close = 0;
if (dm_list_empty(&dev->aliases))
return 0;
if (dev->size_seqno == _dev_size_seqno) {
log_very_verbose("%s: using cached size %" PRIu64 " sectors",
name, dev->size);

View File

@@ -646,6 +646,45 @@ out:
return ret;
}
#ifdef BLKID_WIPING_SUPPORT
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
{
blkid_probe probe = NULL;
const char *block_size_str = NULL;
uint64_t block_size_val;
int r = 0;
*fs_block_size = 0;
if (!(probe = blkid_new_probe_from_filename(dev_name(dev)))) {
log_error("Failed to create a new blkid probe for device %s.", dev_name(dev));
goto out;
}
blkid_probe_enable_partitions(probe, 1);
(void) blkid_probe_lookup_value(probe, "BLOCK_SIZE", &block_size_str, NULL);
if (!block_size_str)
goto out;
block_size_val = strtoull(block_size_str, NULL, 10);
*fs_block_size = (uint32_t)block_size_val;
r = 1;
out:
if (probe)
blkid_free_probe(probe);
return r;
}
#else
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
{
*fs_block_size = 0;
return 0;
}
#endif
#ifdef BLKID_WIPING_SUPPORT
static inline int _type_in_flag_list(const char *type, uint32_t flag_list)

View File

@@ -97,4 +97,6 @@ int dev_is_pmem(struct device *dev);
int dev_is_lv(struct device *dev);
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size);
#endif

View File

@@ -315,7 +315,7 @@ struct volume_group *backup_read_vg(struct cmd_context *cmd,
}
dm_list_iterate_items(mda, &tf->metadata_areas_in_use) {
if (!(vg = mda->ops->vg_read(tf, vg_name, mda, NULL, NULL)))
if (!(vg = mda->ops->vg_read(cmd, tf, vg_name, mda, NULL, NULL)))
stack;
break;
}

View File

@@ -290,7 +290,8 @@ static int _raw_write_mda_header(const struct format_type *fmt,
* in the label scanning path.
*/
static struct raw_locn *_read_metadata_location_vg(struct device_area *dev_area,
static struct raw_locn *_read_metadata_location_vg(struct cmd_context *cmd,
struct device_area *dev_area,
struct mda_header *mdah, int primary_mda,
const char *vgname,
int *precommitted)
@@ -369,7 +370,7 @@ static struct raw_locn *_read_metadata_location_vg(struct device_area *dev_area,
vgnamebuf, vgname);
if ((info = lvmcache_info_from_pvid(dev_area->dev->pvid, dev_area->dev, 0)) &&
!lvmcache_update_vgname_and_id(info, &vgsummary_orphan))
!lvmcache_update_vgname_and_id(cmd, info, &vgsummary_orphan))
stack;
return NULL;
@@ -447,7 +448,8 @@ static uint64_t _next_rlocn_offset(struct volume_group *vg, struct raw_locn *rlo
return new_start;
}
static struct volume_group *_vg_read_raw_area(struct format_instance *fid,
static struct volume_group *_vg_read_raw_area(struct cmd_context *cmd,
struct format_instance *fid,
const char *vgname,
struct device_area *area,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -468,7 +470,7 @@ static struct volume_group *_vg_read_raw_area(struct format_instance *fid,
goto out;
}
if (!(rlocn = _read_metadata_location_vg(area, mdah, primary_mda, vgname, &precommitted))) {
if (!(rlocn = _read_metadata_location_vg(cmd, area, mdah, primary_mda, vgname, &precommitted))) {
log_debug_metadata("VG %s not found on %s", vgname, dev_name(area->dev));
goto out;
}
@@ -503,7 +505,8 @@ static struct volume_group *_vg_read_raw_area(struct format_instance *fid,
return vg;
}
static struct volume_group *_vg_read_raw(struct format_instance *fid,
static struct volume_group *_vg_read_raw(struct cmd_context *cmd,
struct format_instance *fid,
const char *vgname,
struct metadata_area *mda,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -512,12 +515,13 @@ static struct volume_group *_vg_read_raw(struct format_instance *fid,
struct mda_context *mdac = (struct mda_context *) mda->metadata_locn;
struct volume_group *vg;
vg = _vg_read_raw_area(fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 0, mda_is_primary(mda));
vg = _vg_read_raw_area(cmd, fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 0, mda_is_primary(mda));
return vg;
}
static struct volume_group *_vg_read_precommit_raw(struct format_instance *fid,
static struct volume_group *_vg_read_precommit_raw(struct cmd_context *cmd,
struct format_instance *fid,
const char *vgname,
struct metadata_area *mda,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -526,7 +530,7 @@ static struct volume_group *_vg_read_precommit_raw(struct format_instance *fid,
struct mda_context *mdac = (struct mda_context *) mda->metadata_locn;
struct volume_group *vg;
vg = _vg_read_raw_area(fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 1, mda_is_primary(mda));
vg = _vg_read_raw_area(cmd, fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 1, mda_is_primary(mda));
return vg;
}
@@ -1321,7 +1325,7 @@ static struct volume_group *_vg_read_file_name(struct format_instance *fid,
return vg;
}
static struct volume_group *_vg_read_file(struct format_instance *fid,
static struct volume_group *_vg_read_file(struct cmd_context *cmd, struct format_instance *fid,
const char *vgname,
struct metadata_area *mda,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -1332,7 +1336,7 @@ static struct volume_group *_vg_read_file(struct format_instance *fid,
return _vg_read_file_name(fid, vgname, tc->path_live);
}
static struct volume_group *_vg_read_precommit_file(struct format_instance *fid,
static struct volume_group *_vg_read_precommit_file(struct cmd_context *cmd, struct format_instance *fid,
const char *vgname,
struct metadata_area *mda,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -1713,7 +1717,7 @@ static int _set_ext_flags(struct physical_volume *pv, struct lvmcache_info *info
}
/* Only for orphans - FIXME That's not true any more */
static int _text_pv_write(const struct format_type *fmt, struct physical_volume *pv)
static int _text_pv_write(struct cmd_context *cmd, const struct format_type *fmt, struct physical_volume *pv)
{
struct format_instance *fid = pv->fid;
const char *pvid = (const char *) (*pv->old_id.uuid ? &pv->old_id : &pv->id);
@@ -1725,7 +1729,7 @@ static int _text_pv_write(const struct format_type *fmt, struct physical_volume
unsigned mda_index;
/* Add a new cache entry with PV info or update existing one. */
if (!(info = lvmcache_add(fmt->labeller, (const char *) &pv->id,
if (!(info = lvmcache_add(cmd, fmt->labeller, (const char *) &pv->id,
pv->dev, pv->label_sector, pv->vg_name,
is_orphan_vg(pv->vg_name) ? pv->vg_name : pv->vg ? (const char *) &pv->vg->id : NULL, 0, NULL)))
return_0;

View File

@@ -370,7 +370,7 @@ static int _read_mda_header_and_metadata(const struct format_type *fmt,
* the metadata is at for those PVs.
*/
static int _text_read(struct labeller *labeller, struct device *dev, void *label_buf,
static int _text_read(struct cmd_context *cmd, struct labeller *labeller, struct device *dev, void *label_buf,
uint64_t label_sector, int *is_duplicate)
{
struct lvmcache_vgsummary vgsummary;
@@ -410,7 +410,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label
*
* Other reasons for lvmcache_add to return NULL are internal errors.
*/
if (!(info = lvmcache_add(labeller, (char *)pvhdr->pv_uuid, dev, label_sector,
if (!(info = lvmcache_add(cmd, labeller, (char *)pvhdr->pv_uuid, dev, label_sector,
FMT_TEXT_ORPHAN_VG_NAME,
FMT_TEXT_ORPHAN_VG_NAME, 0, is_duplicate)))
return_0;
@@ -503,7 +503,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label
rv1 = _read_mda_header_and_metadata(fmt, mda1, &vgsummary, &bad_fields);
if (rv1 && !vgsummary.zero_offset && !vgsummary.mda_ignored) {
if (!lvmcache_update_vgname_and_id(info, &vgsummary)) {
if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) {
/* I believe this is only an internal error. */
dm_list_del(&mda1->list);
@@ -554,7 +554,7 @@ static int _text_read(struct labeller *labeller, struct device *dev, void *label
rv2 = _read_mda_header_and_metadata(fmt, mda2, &vgsummary, &bad_fields);
if (rv2 && !vgsummary.zero_offset && !vgsummary.mda_ignored) {
if (!lvmcache_update_vgname_and_id(info, &vgsummary)) {
if (!lvmcache_update_vgname_and_id(cmd, info, &vgsummary)) {
dm_list_del(&mda2->list);
/* Are there other cases besides mismatch and internal error? */

View File

@@ -245,8 +245,8 @@ static int _target_present(struct cmd_context *cmd,
if (!target_version(TARGET_NAME_INTEGRITY, &maj, &min, &patchlevel))
return 0;
if (maj < 1 || min < 5) {
log_error("Integrity target version older than minimum 1.5.0");
if (maj < 1 || min < 6) {
log_error("Integrity target version older than minimum 1.6.0");
return 0;
}
}

View File

@@ -351,6 +351,7 @@ static void _unlock_hints(struct cmd_context *cmd)
void hints_exit(struct cmd_context *cmd)
{
free_hints(&cmd->hints);
if (_hints_fd == -1)
return;
return _unlock_hints(cmd);
@@ -419,6 +420,9 @@ static int _dev_in_hint_hash(struct cmd_context *cmd, struct device *dev)
{
uint64_t devsize = 0;
if (dm_list_empty(&dev->aliases))
return 0;
if (!cmd->filter->passes_filter(cmd, cmd->filter, dev, "regex"))
return 0;
@@ -1318,6 +1322,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
*/
if (!_read_hint_file(cmd, &hints_list, &needs_refresh)) {
log_debug("get_hints: read fail");
free_hints(&hints_list);
_unlock_hints(cmd);
return 0;
}
@@ -1330,6 +1335,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
*/
if (needs_refresh) {
log_debug("get_hints: needs refresh");
free_hints(&hints_list);
if (!_lock_hints(cmd, LOCK_EX, NONBLOCK))
return 0;

View File

@@ -431,7 +431,7 @@ static int _process_block(struct cmd_context *cmd, struct dev_filter *f,
* info/vginfo structs. That lvmcache info is used later when the
* command wants to read the VG to do something to it.
*/
ret = labeller->ops->read(labeller, dev, label_buf, sector, &is_duplicate);
ret = labeller->ops->read(cmd, labeller, dev, label_buf, sector, &is_duplicate);
if (!ret) {
if (is_duplicate) {

View File

@@ -64,7 +64,7 @@ struct label_ops {
/*
* Read a label from a volume.
*/
int (*read) (struct labeller * l, struct device * dev,
int (*read) (struct cmd_context *cmd, struct labeller * l, struct device * dev,
void *label_buf, uint64_t label_sector, int *is_duplicate);
/*

View File

@@ -635,7 +635,6 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, in
const char *vg_lock_args = NULL;
const char *opts = NULL;
struct pv_list *pvl;
struct device *sector_dev;
uint32_t sector_size = 0;
unsigned int physical_block_size, logical_block_size;
int num_mb = 0;
@@ -656,16 +655,11 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, in
dm_list_iterate_items(pvl, &vg->pvs) {
if (!dev_get_direct_block_sizes(pvl->pv->dev, &physical_block_size, &logical_block_size))
continue;
if (!sector_size) {
sector_size = logical_block_size;
sector_dev = pvl->pv->dev;
} else if (sector_size != logical_block_size) {
log_error("Inconsistent logical block sizes for %s and %s.",
dev_name(pvl->pv->dev), dev_name(sector_dev));
return 0;
}
if ((physical_block_size == 4096) || (logical_block_size == 4096))
sector_size = 4096;
}
if (!sector_size)
sector_size = 512;
log_debug("Using sector size %u for sanlock LV", sector_size);

View File

@@ -278,6 +278,126 @@ int lv_remove_integrity_from_raid(struct logical_volume *lv)
return 1;
}
static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_volume *lv,
struct integrity_settings *settings,
int lbs_4k, int lbs_512, int pbs_4k, int pbs_512)
{
char pathname[PATH_MAX];
struct device *fs_dev;
uint32_t fs_block_size = 0;
int rv;
if (lbs_4k && lbs_512) {
log_error("Integrity requires consistent logical block size for LV devices.");
goto_bad;
}
if (settings->block_size &&
(settings->block_size != 512 && settings->block_size != 1024 &&
settings->block_size != 2048 && settings->block_size != 4096)) {
log_error("Invalid integrity block size, possible values are 512, 1024, 2048, 4096");
goto_bad;
}
if (lbs_4k && settings->block_size && (settings->block_size < 4096)) {
log_error("Integrity block size %u not allowed with device logical block size 4096.",
settings->block_size);
goto_bad;
}
if (!strcmp(cmd->name, "lvcreate")) {
if (lbs_4k) {
settings->block_size = 4096;
} else if (lbs_512 && pbs_4k && !pbs_512) {
settings->block_size = 4096;
} else if (lbs_512) {
if (!settings->block_size)
settings->block_size = 512;
} else if (!lbs_4k && !lbs_512) {
if (!settings->block_size)
settings->block_size = 512;
log_print("Using integrity block size %u with unknown device logical block size.",
settings->block_size);
} else {
goto_bad;
}
} else if (!strcmp(cmd->name, "lvconvert")) {
if (dm_snprintf(pathname, sizeof(pathname), "%s%s/%s", cmd->dev_dir,
lv->vg->name, lv->name) < 0) {
log_error("Path name too long to get LV block size %s", display_lvname(lv));
goto_bad;
}
if (!(fs_dev = dev_cache_get(cmd, pathname, NULL))) {
log_error("Device for LV not found to check block size %s", display_lvname(lv));
goto_bad;
}
/*
* get_fs_block_size() returns the libblkid BLOCK_SIZE value,
* where libblkid has fs-specific code to set BLOCK_SIZE to the
* value we need here.
*
* The term "block size" here may not equate directly to what the fs
* calls the block size, e.g. xfs calls this the sector size (and
* something different the block size); while ext4 does call this
* value the block size, but it's possible values are not the same
* as xfs's, and do not seem to relate directly to the device LBS.
*/
rv = get_fs_block_size(fs_dev, &fs_block_size);
if (!rv || !fs_block_size) {
int use_bs;
if (lbs_4k && pbs_4k) {
use_bs = 4096;
} else if (lbs_512 && pbs_512) {
use_bs = 512;
} else if (lbs_512 && pbs_4k) {
if (settings->block_size == 4096)
use_bs = 4096;
else
use_bs = 512;
} else {
use_bs = 512;
}
if (settings->block_size && (settings->block_size != use_bs)) {
log_error("Cannot use integrity block size %u with unknown file system block size, logical block size %u, physical block size %u.",
settings->block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512);
goto bad;
}
settings->block_size = use_bs;
log_print("Using integrity block size %u for unknown file system block size, logical block size %u, physical block size %u.",
settings->block_size, lbs_4k ? 4096 : 512, pbs_4k ? 4096 : 512);
goto out;
}
if (!settings->block_size) {
if (fs_block_size <= 4096)
settings->block_size = fs_block_size;
else
settings->block_size = 4096; /* dm-integrity max is 4096 */
log_print("Using integrity block size %u for file system block size %u.",
settings->block_size, fs_block_size);
} else {
/* let user specify integrity block size that is less than fs block size */
if (settings->block_size > fs_block_size) {
log_error("Integrity block size %u cannot be larger than file system block size %u.",
settings->block_size, fs_block_size);
goto_bad;
}
log_print("Using integrity block size %u for file system block size %u.",
settings->block_size, fs_block_size);
}
}
out:
return 1;
bad:
return 0;
}
/*
* Add integrity to each raid image.
*
@@ -321,11 +441,14 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting
struct volume_group *vg = lv->vg;
struct logical_volume *lv_image, *lv_imeta, *lv_iorig;
struct lv_segment *seg_top, *seg_image;
struct pv_list *pvl;
const struct segment_type *segtype;
struct integrity_settings *set;
struct integrity_settings *set = NULL;
struct dm_list *use_pvh = NULL;
uint32_t area_count, s;
uint32_t revert_meta_lvs = 0;
int lbs_4k = 0, lbs_512 = 0, lbs_unknown = 0;
int pbs_4k = 0, pbs_512 = 0, pbs_unknown = 0;
int is_active;
memset(imeta_lvs, 0, sizeof(imeta_lvs));
@@ -401,6 +524,29 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting
goto_bad;
}
dm_list_iterate_items(pvl, &allocatable_pvs) {
unsigned int pbs = 0;
unsigned int lbs = 0;
if (!dev_get_direct_block_sizes(pvl->pv->dev, &pbs, &lbs)) {
lbs_unknown++;
pbs_unknown++;
continue;
}
if (lbs == 4096)
lbs_4k++;
else if (lbs == 512)
lbs_512++;
else
lbs_unknown++;
if (pbs == 4096)
pbs_4k++;
else if (pbs == 512)
pbs_512++;
else
pbs_unknown++;
}
use_pvh = &allocatable_pvs;
/*
@@ -441,6 +587,14 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting
}
}
/*
* Set settings->block_size which will be copied to segment settings below.
* integrity block size chosen based on device logical block size and
* file system block size.
*/
if (!_set_integrity_block_size(cmd, lv, settings, lbs_4k, lbs_512, pbs_4k, pbs_512))
goto_bad;
/*
* For each rimage, move its segments to a new rimage_iorig and give
* the rimage a new integrity segment.

View File

@@ -744,9 +744,6 @@ struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_
const char *vgid, uint32_t read_flags, uint32_t lockd_state);
struct volume_group *vg_read_orphans(struct cmd_context *cmd, const char *orphan_vgname);
/* this is historical and being removed, don't use */
uint32_t vg_read_error(struct volume_group *vg_handle);
/* pe_start and pe_end relate to any existing data so that new metadata
* areas can avoid overlap */
struct physical_volume *pv_create(const struct cmd_context *cmd,

View File

@@ -3666,7 +3666,7 @@ int pv_write(struct cmd_context *cmd,
return 0;
}
if (!pv->fmt->ops->pv_write(pv->fmt, pv))
if (!pv->fmt->ops->pv_write(cmd, pv->fmt, pv))
return_0;
pv->status &= ~UNLABELLED_PV;
@@ -4010,17 +4010,6 @@ static int _access_vg_exported(struct cmd_context *cmd, struct volume_group *vg)
return 0;
}
/*
* Test the validity of a VG handle returned by vg_read() or vg_read_for_update().
*/
uint32_t vg_read_error(struct volume_group *vg_handle)
{
if (!vg_handle)
return FAILED_ALLOCATION;
return SUCCESS;
}
struct format_instance *alloc_fid(const struct format_type *fmt,
const struct format_instance_ctx *fic)
{
@@ -4751,18 +4740,6 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
lvmcache_label_rescan_vg(cmd, vgname, vgid);
}
/* Now determine the correct vgname if none was supplied */
if (!vgname && !(vgname = lvmcache_vgname_from_vgid(cmd->mem, vgid))) {
log_debug_metadata("Cache did not find VG name from vgid %s", vgid);
return NULL;
}
/* Determine the correct vgid if none was supplied */
if (!vgid && !(vgid = lvmcache_vgid_from_vgname(cmd, vgname))) {
log_debug_metadata("Cache did not find VG vgid from name %s", vgname);
return NULL;
}
/*
* A "format instance" is an abstraction for a VG location,
* i.e. where a VG's metadata exists on disk.
@@ -4841,7 +4818,7 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
log_debug_metadata("Reading VG %s precommit metadata from %s %llu",
vgname, dev_name(mda_dev), (unsigned long long)mda->header_start);
vg = mda->ops->vg_read_precommit(fid, vgname, mda, &vg_fmtdata, &use_previous_vg);
vg = mda->ops->vg_read_precommit(cmd, fid, vgname, mda, &vg_fmtdata, &use_previous_vg);
if (!vg && !use_previous_vg) {
log_warn("WARNING: Reading VG %s precommit on %s failed.", vgname, dev_name(mda_dev));
@@ -4852,7 +4829,7 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
log_debug_metadata("Reading VG %s metadata from %s %llu",
vgname, dev_name(mda_dev), (unsigned long long)mda->header_start);
vg = mda->ops->vg_read(fid, vgname, mda, &vg_fmtdata, &use_previous_vg);
vg = mda->ops->vg_read(cmd, fid, vgname, mda, &vg_fmtdata, &use_previous_vg);
if (!vg && !use_previous_vg) {
log_warn("WARNING: Reading VG %s on %s failed.", vgname, dev_name(mda_dev));
@@ -4999,6 +4976,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const
int missing_pv_dev = 0;
int missing_pv_flag = 0;
uint32_t failure = 0;
int original_vgid_set = vgid ? 1 : 0;
int writing = (vg_read_flags & READ_FOR_UPDATE);
int activating = (vg_read_flags & READ_FOR_ACTIVATE);
@@ -5033,7 +5011,45 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const
goto bad;
}
/* I belive this is unused, the name is always set. */
if (!vg_name && !(vg_name = lvmcache_vgname_from_vgid(cmd->mem, vgid))) {
unlock_vg(cmd, NULL, vg_name);
log_error("VG name not found for vgid %s", vgid);
failure |= FAILED_NOTFOUND;
goto_bad;
}
/*
* If the command is process all vgs, process_each will get a list of vgname+vgid
* pairs, and then call vg_read() for each vgname+vigd. In this case we know
* which VG to read even if there are duplicate names, and we don't fail.
*
* If the user has requested one VG by name, process_each passes only the vgname
* to vg_read(), and we look up the vgid from lvmcache. lvmcache finds duplicate
* vgnames, doesn't know which is intended, returns a NULL vgid, and we fail.
*/
if (!vgid)
vgid = lvmcache_vgid_from_vgname(cmd, vg_name);
if (!vgid) {
unlock_vg(cmd, NULL, vg_name);
/* Some callers don't care if the VG doesn't exist and don't want an error message. */
if (!(vg_read_flags & READ_OK_NOTFOUND))
log_error("Volume group \"%s\" not found", vg_name);
failure |= FAILED_NOTFOUND;
goto_bad;
}
/*
* vgchange -ay (no vgname arg) will activate multiple local VGs with the same
* name, but if the vgs have the same lv name, activating those lvs will fail.
*/
if (activating && original_vgid_set && lvmcache_has_duplicate_local_vgname(vgid, vg_name))
log_warn("WARNING: activating multiple VGs with the same name is dangerous and may fail.");
if (!(vg = _vg_read(cmd, vg_name, vgid, 0, writing))) {
unlock_vg(cmd, NULL, vg_name);
/* Some callers don't care if the VG doesn't exist and don't want an error message. */
if (!(vg_read_flags & READ_OK_NOTFOUND))
log_error("Volume group \"%s\" not found.", vg_name);

View File

@@ -76,12 +76,14 @@ struct cached_vg_fmtdata;
/* Per-format per-metadata area operations */
struct metadata_area_ops {
struct dm_list list;
struct volume_group *(*vg_read) (struct format_instance * fi,
struct volume_group *(*vg_read) (struct cmd_context *cmd,
struct format_instance * fi,
const char *vg_name,
struct metadata_area * mda,
struct cached_vg_fmtdata **vg_fmtdata,
unsigned *use_previous_vg);
struct volume_group *(*vg_read_precommit) (struct format_instance * fi,
struct volume_group *(*vg_read_precommit) (struct cmd_context *cmd,
struct format_instance * fi,
const char *vg_name,
struct metadata_area * mda,
struct cached_vg_fmtdata **vg_fmtdata,
@@ -326,7 +328,7 @@ struct format_handler {
* Write a PV structure to disk. Fails if the PV is in a VG ie
* pv->vg_name must be a valid orphan VG name
*/
int (*pv_write) (const struct format_type * fmt,
int (*pv_write) (struct cmd_context *cmd, const struct format_type * fmt,
struct physical_volume * pv);
/*

View File

@@ -9,6 +9,7 @@ blkdeactivate \(em utility to deactivate block devices
.RB [ -l \ \fIlvm_options\fP ]
.RB [ -m \ \fImpath_options\fP ]
.RB [ -r \ \fImdraid_options\fP ]
.RB [ -o \ \fIvdo_options\fP ]
.RB [ -u ]
.RB [ -v ]
.RI [ device ]
@@ -70,6 +71,15 @@ Comma-separated list of MD RAID specific options:
Wait MD device's resync, recovery or reshape action to complete
before deactivation.
.RE
.TP
.BR -o ", " --vdooptions \ \fIvdo_options\fP
Comma-separated list of VDO specific options:
.RS
.IP \fIconfigfile=file\fP
Use specified VDO configuration file.
.RE
.TP
.BR -u ", " --umount
Unmount a mounted device before trying to deactivate it.
@@ -120,4 +130,5 @@ of a device-mapper device fails, retry it and force removal.
.BR lvm (8),
.BR mdadm (8),
.BR multipathd (8),
.BR vdo (8),
.BR umount (8)

View File

@@ -58,6 +58,10 @@ For default settings, see lvmlockd -h.
.I path
Set path to the socket to listen on.
.B --adopt-file
.I path
Set path to the adopt file.
.B --syslog-priority | -S err|warning|debug
Write log messages from this level up to syslog.
@@ -76,6 +80,8 @@ For default settings, see lvmlockd -h.
.I seconds
Override the default sanlock I/O timeout.
.B --adopt | -A 0|1
Enable (1) or disable (0) lock adoption.
.SH USAGE
@@ -548,7 +554,13 @@ necessary locks.
.B lvmlockd failure
If lvmlockd fails or is killed while holding locks, the locks are orphaned
in the lock manager.
in the lock manager. Orphaned locks must be cleared or adopted before the
associated resources can be accessed normally. If lock adoption is
enabled, lvmlockd keeps a record of locks in the adopt-file. A subsequent
instance of lvmlockd will then adopt locks orphaned by the previous
instance. Adoption must be enabled in both instances (--adopt|-A 1).
Without adoption, the lock manager or host would require a reset to clear
orphaned lock state.
.B dlm/corosync failure

View File

@@ -785,6 +785,89 @@ configuration file itself.
activation_mode
.SH Data Integrity
The device mapper integrity target can be used in combination with RAID
levels 1,4,5,6,10 to detect and correct data corruption in RAID images. A
dm-integrity layer is placed above each RAID image, and an extra sub LV is
created to hold integrity metadata (data checksums) for each RAID image.
When data is read from an image, integrity checksums are used to detect
corruption. If detected, dm-raid reads the data from another (good) image
to return to the caller. dm-raid will also automatically write the good
data back to the image with bad data to correct the corruption.
When creating a RAID LV with integrity, or adding integrity, space is
required for integrity metadata. Every 500MB of LV data requires an
additional 4MB to be allocated for integrity metadata, for each RAID
image.
Create a RAID LV with integrity:
.B lvcreate \-\-type raidN \-\-raidintegrity y
Add integrity to an existing RAID LV:
.B lvconvert --raidintegrity y
.I LV
Remove integrity from a RAID LV:
.B lvconvert --raidintegrity n
.I LV
.SS Integrity options
.B --raidintegritymode journal|bitmap
Use a journal (default) or bitmap for keeping integrity checksums
consistent in case of a crash. The bitmap areas are recalculated after a
crash, so corruption in those areas would not be detected. A journal does
not have this problem. The journal mode doubles writes to storage, but
can improve performance for scattered writes packed into a single journal
write. bitmap mode can in theory achieve full write throughput of the
device, but would not benefit from the potential scattered write
optimization.
.B --raidintegrityblocksize 512|1024|2048|4096
The block size to use for dm-integrity on raid images. The integrity
block size should usually match the device logical block size, or the file
system sector/block sizes. It may be less than the file system
sector/block size, but not less than the device logical block size.
Possible values: 512, 1024, 2048, 4096.
.SS Integrity initialization
When integrity is added to an LV, the kernel needs to initialize the
integrity metadata (checksums) for all blocks in the LV. The data
corruption checking performed by dm-integrity will only operate on areas
of the LV that are already initialized. The progress of integrity
initialization is reported by the "syncpercent" LV reporting field (and
under the Cpy%Sync lvs column.)
.SS Integrity limitations
To work around some limitations, it is possible to remove integrity from
the LV, make the change, then add integrity again. (Integrity metadata
would need to initialized when added again.)
LVM must be able to allocate the integrity metadata sub LV on a single PV
that is already in use by the associated RAID image. This can potentially
cause a problem during lvextend if the original PV holding the image and
integrity metadata is full. To work around this limitation, remove
integrity, extend the LV, and add integrity again.
Additional RAID images can be added to raid1 LVs, but not to other raid
levels.
A raid1 LV with integrity cannot be converted to linear (remove integrity
to do this.)
RAID LVs with integrity cannot yet be used as sub LVs with other LV types.
The following are not yet permitted on RAID LVs with integrity: lvreduce,
pvmove, snapshots, splitmirror, raid syncaction commands, raid rebuild.
.SH RAID1 Tuning
A RAID1 LV can be tuned so that certain devices are avoided for reading

View File

@@ -112,7 +112,7 @@ version without the system ID feature.
A local VG is meant to be used by a single host.
A shared or clustered VG is meant to be used by multiple hosts.
A shared VG is meant to be used by multiple hosts.
These can be further distinguished as:
@@ -168,16 +168,31 @@ global {
}
.fi
.TP
.B appmachineid
.br
An LVM-specific derivation of /etc/machine-id is used as the system ID.
See
.BR machine-id (5)
to check if machine-id is available on the host.
.I lvm.conf
.nf
global {
system_id_source = "appmachineid"
}
.fi
.TP
.B machineid
.br
The content of /etc/machine-id is used as the system ID if available.
The content of /etc/machine-id is used as the system ID.
See
.BR machine-id (5)
and
.BR systemd-machine-id-setup (1)
to check if machine-id is available on the host.
(appmachineid is recommended in place of machineid.)
.I lvm.conf
.nf

View File

@@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (C) 2012-2017 Red Hat, Inc. All rights reserved.
# Copyright (C) 2012-2020 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -38,6 +38,7 @@ MDADM="/sbin/mdadm"
MOUNTPOINT="/bin/mountpoint"
MPATHD="/sbin/multipathd"
UMOUNT="/bin/umount"
VDO="/bin/vdo"
sbindir="@SBINDIR@"
DMSETUP="$sbindir/dmsetup"
@@ -54,6 +55,7 @@ DMSETUP_OPTS=""
LVM_OPTS=""
MDADM_OPTS=""
MPATHD_OPTS=""
VDO_OPTS=""
LSBLK="/bin/lsblk -r --noheadings -o TYPE,KNAME,NAME,MOUNTPOINT"
LSBLK_VARS="local devtype local kname local name local mnt"
@@ -124,6 +126,7 @@ usage() {
echo " -l | --lvmoptions LVM_OPTIONS Comma separated LVM specific options"
echo " -m | --mpathoptions MPATH_OPTIONS Comma separated DM-multipath specific options"
echo " -r | --mdraidoptions MDRAID_OPTIONS Comma separated MD RAID specific options"
echo " -o | --vdooptions VDO_OPTIONS Comma separated VDO specific options"
echo " -u | --umount Unmount the device if mounted"
echo " -v | --verbose Verbose mode (also implies -e)"
echo
@@ -138,6 +141,8 @@ usage() {
echo " wait wait for resync, recovery or reshape to complete first"
echo " MPATH_OPTIONS:"
echo " disablequeueing disable queueing on all DM-multipath devices first"
echo " VDO_OPTIONS:"
echo " configfile=file use specified VDO configuration file"
exit
}
@@ -319,6 +324,23 @@ deactivate_md () {
fi
}
deactivate_vdo() {
local xname
xname=$(printf "%s" "$name")
test -b "$DEV_DIR/mapper/$xname" || return 0
test -z "${SKIP_DEVICE_LIST["$kname"]}" || return 1
deactivate_holders "$DEV_DIR/mapper/$xname" || return 1
echo -n " [VDO]: deactivating VDO volume $xname... "
if eval "$VDO" stop $VDO_OPTS --name="$xname" "$OUT" "$ERR"; then
echo "done"
else
echo "skipping"
add_device_to_skip_list
fi
}
deactivate () {
######################################################################
# DEACTIVATION HOOKS FOR NEW DEVICE TYPES GO HERE! #
@@ -335,6 +357,8 @@ deactivate () {
######################################################################
if test "$devtype" = "lvm"; then
deactivate_lvm
elif test "$devtype" = "vdo"; then
deactivate_vdo
elif test "${kname:0:3}" = "dm-"; then
deactivate_dm
elif test "${kname:0:2}" = "md"; then
@@ -479,6 +503,20 @@ get_mpathopts() {
IFS=$ORIG_IFS
}
get_vdoopts() {
ORIG_IFS=$IFS; IFS=','
for opt in $1; do
case "$opt" in
"") ;;
configfile=*) tmp=${opt#*=}; VDO_OPTS+="--confFile=${tmp%%,*} " ;;
*) echo "$opt: unknown VDO option"
esac
done
IFS=$ORIG_IFS
}
set_env() {
if test "$ERRORS" -eq "1"; then
unset ERR
@@ -493,6 +531,7 @@ set_env() {
LVM_OPTS+="-vvvv"
MDADM_OPTS+="-vv"
MPATHD_OPTS+="-v 3"
VDO_OPTS+="--verbose "
else
OUT="1>$DEV_DIR/null"
fi
@@ -509,6 +548,12 @@ set_env() {
MDADM_AVAILABLE=0
fi
if test -f $VDO; then
VDO_AVAILABLE=1
else
VDO_AVAILABLE=0
fi
MPATHD_RUNNING=0
test "$MPATHD_DO_DISABLEQUEUEING" -eq 1 && {
if test -f "$MPATHD"; then
@@ -528,6 +573,7 @@ while test $# -ne 0; do
"-l"|"--lvmoptions") get_lvmopts "$2" ; shift ;;
"-m"|"--mpathoptions") get_mpathopts "$2" ; shift ;;
"-r"|"--mdraidoptions") get_mdraidopts "$2"; shift ;;
"-o"|"--vdooptions") get_vdoopts "$2"; shift ;;
"-u"|"--umount") DO_UMOUNT=1 ;;
"-v"|"--verbose") VERBOSE=1 ; ERRORS=1 ;;
"-vv") VERBOSE=1 ; ERRORS=1 ; set -x ;;

View File

@@ -8,7 +8,7 @@ Type=oneshot
RemainAfterExit=yes
# start lockspaces and wait for them to finish starting
ExecStart=@SBINDIR@/lvm vgchange --lock-start --lock-opt autowait
ExecStart=@SBINDIR@/lvm vgchange --lock-start --lock-opt auto
# stop lockspaces and wait for them to finish stopping
ExecStop=@SBINDIR@/lvmlockctl --stop-lockspaces --wait 1

View File

@@ -41,6 +41,7 @@
%with vdo internal
%with vdo-format %{_bindir}/vdoformat
%with writecache internal
%with integrity internal
%global buildreq_udev systemd-devel
%global req_udev udev >= 181-1

View File

@@ -0,0 +1,660 @@
#!/usr/bin/env bash
# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
SKIP_WITH_LVMLOCKD=1
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_devs 7
# test setups:
# # local vgs named foo # foreign vg named foo
# a. 0 1
# b. 0 2
# c. 1 1
# d. 1 2
# e. 2 0
# f. 2 1
# g. 2 2
# h. 3 3
#
# commands to run for each test setup:
#
# vgs
# all cases show all local
#
# vgs --foreign
# all cases show all local and foreign
#
# vgs foo
# a. not found
# b. not found
# c. show 1 local
# d. show 1 local
# e-g. dup error
#
# vgs --foreign foo
# a. show 1 foreign
# b. dup error
# c. show 1 local
# d. show 1 local
# e-g. dup error
#
# vgchange -ay
# a. none
# b. none
# c. activate 1 local
# d. activate 1 local
# e-g. activate 2 local
# (if both local vgs have lvs with same name the second will fail to activate)
#
# vgchange -ay foo
# a. none
# b. none
# c. activate 1 local
# d. activate 1 local
# e-g. dup error
#
# lvcreate foo
# a. none
# b. none
# c. create 1 local
# d. create 1 local
# e-g. dup error
#
# vgremove foo
# a. none
# b. none
# c. remove 1 local
# d. remove 1 local
# e-g. dup error
# (in a couple cases test that vgremove -S vg_uuid=N works for local vg when local dups exist)
# a. 0 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
vgs -o+uuid |tee out
not grep $vg1 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
vgs --foreign -o+uuid $vg1 |tee out
grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
not grep active out
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
not grep active out
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
not vgremove $vg1
vgs --foreign -o+uuid |tee out
grep $UUID1 out
vgremove -y -S vg_uuid=$UUID1
vgs --foreign -o+uuid |tee out
grep $UUID1 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
# b. 0 local, 2 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux enable_dev "$dev1"
vgs -o+uuid |tee out
not grep $vg1 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
not vgs --foreign -o+uuid $vg1 |tee out
not grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
not grep active out
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
not grep active out
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
grep $UUID2 out | not grep $lv2
not vgremove $vg1
vgs --foreign -o+uuid |tee out
grep $UUID1 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
# c. 1 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux enable_dev "$dev1"
vgs -o+uuid |tee out
cat out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
vgs -o+uuid $vg1 |tee out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
vgs --foreign -o+uuid $vg1 |tee out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | not grep active
vgchange -an
vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | not grep active
vgchange -an
lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | grep $lv2
grep $UUID2 out | not grep $lv2
vgremove -y $vg1
vgs -o+uuid |tee out
not grep $UUID1 out
vgs --foreign -o+uuid |tee out
grep $UUID2 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
# d. 1 local, 2 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n $lv1 -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
not grep $UUID3 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgs -o+uuid $vg1 |tee out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
not grep $UUID3 out
vgs --foreign -o+uuid $vg1 |tee out
grep $vg1 out
grep $UUID1 out
not grep $UUID2 out
not grep $UUID3 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | not grep active
grep $UUID3 out | not grep active
vgchange -an
vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | not grep active
grep $UUID3 out | not grep active
vgchange -an
lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | grep $lv2
grep $UUID2 out | not grep $lv2
grep $UUID3 out | not grep $lv2
vgremove -y $vg1
vgs -o+uuid |tee out
not grep $UUID1 out
vgs --foreign -o+uuid |tee out
grep $UUID2 out
grep $UUID3 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
aux wipefs_a "$dev4"
# e. 2 local, 0 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
# diff lvname to prevent clash in vgchange -ay
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux enable_dev "$dev1"
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
not vgs --foreign -o+uuid $vg1 |tee out
not grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | grep active
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | not grep active
grep $UUID2 out | not grep active
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
grep $UUID2 out | not grep $lv2
not vgremove $vg1
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
vgremove -y -S vg_uuid=$UUID1
vgs -o+uuid |tee out
not grep $UUID1 out
grep $UUID2 out
vgremove -y -S vg_uuid=$UUID2
vgs -o+uuid |tee out
not grep $UUID1 out
not grep $UUID2 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
# f. 2 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
# diff lvname to prevent clash in vgchange -ay
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n $lv1 -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
not group $UUID3 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
not vgs --foreign -o+uuid $vg1 |tee out
not grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | grep active
grep $UUID3 out | not grep active
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | not grep active
grep $UUID2 out | not grep active
grep $UUID3 out | not grep active
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
grep $UUID2 out | not grep $lv2
grep $UUID3 out | not grep $lv2
not vgremove $vg1
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgremove -y -S vg_uuid=$UUID1
vgs --foreign -o+uuid |tee out
not grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgremove -y -S vg_uuid=$UUID2
vgs --foreign -o+uuid |tee out
not grep $UUID1 out
not grep $UUID2 out
grep $UUID3 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
aux wipefs_a "$dev4"
# g. 2 local, 2 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
# diff lvname to prevent clash in vgchange -ay
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n $lv1 -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev3"
vgcreate $vg1 "$dev4"
lvcreate -n $lv1 -l1 -an $vg1
UUID4=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
aux enable_dev "$dev3"
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
not group $UUID3 out
not group $UUID4 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
grep $UUID4 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
not vgs --foreign -o+uuid $vg1 |tee out
not grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | grep active
grep $UUID3 out | not grep active
grep $UUID4 out | not grep active
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | not grep active
grep $UUID2 out | not grep active
grep $UUID3 out | not grep active
grep $UUID4 out | not grep active
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
grep $UUID2 out | not grep $lv2
grep $UUID3 out | not grep $lv2
grep $UUID4 out | not grep $lv2
not vgremove $vg1
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
grep $UUID4 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
aux wipefs_a "$dev4"
aux wipefs_a "$dev5"
# h. 3 local, 3 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
# diff lvname to prevent clash in vgchange -ay
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
# diff lvname to prevent clash in vgchange -ay
lvcreate -n ${lv1}_bb -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev3"
vgcreate $vg1 "$dev4"
lvcreate -n $lv1 -l1 -an $vg1
UUID4=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev4"
vgcreate $vg1 "$dev5"
lvcreate -n $lv1 -l1 -an $vg1
UUID5=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux disable_dev "$dev5"
vgcreate $vg1 "$dev6"
lvcreate -n $lv1 -l1 -an $vg1
UUID6=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other3" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
aux enable_dev "$dev3"
aux enable_dev "$dev4"
aux enable_dev "$dev5"
vgs -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not group $UUID4 out
not group $UUID5 out
not group $UUID6 out
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
grep $UUID4 out
grep $UUID5 out
grep $UUID6 out
not vgs -o+uuid $vg1 |tee out
not grep $vg1 out
not vgs --foreign -o+uuid $vg1 |tee out
not grep $vg1 out
vgchange -ay
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | grep active
grep $UUID2 out | grep active
grep $UUID3 out | grep active
grep $UUID4 out | not grep active
grep $UUID5 out | not grep active
grep $UUID6 out | not grep active
vgchange -an
not vgchange -ay $vg1
lvs --foreign -o vguuid,active |tee out
grep $UUID1 out | not grep active
grep $UUID2 out | not grep active
grep $UUID3 out | not grep active
grep $UUID4 out | not grep active
grep $UUID5 out | not grep active
grep $UUID6 out | not grep active
vgchange -an
not lvcreate -l1 -an -n $lv2 $vg1
lvs --foreign -o vguuid,name |tee out
grep $UUID1 out | not grep $lv2
grep $UUID2 out | not grep $lv2
grep $UUID3 out | not grep $lv2
grep $UUID4 out | not grep $lv2
grep $UUID5 out | not grep $lv2
grep $UUID6 out | not grep $lv2
not vgremove $vg1
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
grep $UUID4 out
grep $UUID5 out
grep $UUID6 out
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
aux wipefs_a "$dev4"
aux wipefs_a "$dev5"
aux wipefs_a "$dev6"
# vgreduce test with 1 local and 1 foreign vg.
# setup
vgcreate $vg1 "$dev1" "$dev7"
lvcreate -n $lv1 -l1 -an $vg1 "$dev1"
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
PV1UUID=$(pvs --noheading -o uuid "$dev1")
PV7UUID=$(pvs --noheading -o uuid "$dev7")
aux disable_dev "$dev1"
aux disable_dev "$dev7"
vgcreate $vg1 "$dev2"
PV2UUID=$(pvs --noheading -o uuid "$dev2")
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev7"
vgs --foreign -o+uuid |tee out
grep $vg1 out
grep $UUID1 out
grep $UUID2 out
pvs --foreign -o+uuid |tee out
grep $PV1UUID out
grep $PV7UUID out
grep $PV2UUID out
vgreduce $vg1 "$dev7"
pvs --foreign -o+uuid |tee out
grep $PV1UUID out
grep $PV7UUID out
grep $PV2UUID out
grep $PV7UUID out >out2
not grep $vg1 out2
vgremove -ff $vg1
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev7"

View File

@@ -0,0 +1,319 @@
#!/usr/bin/env bash
# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
SKIP_WITH_LVMLOCKD=1
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_devs 4
# a. 0 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
not vgrename $vg1 $vg2
vgs --foreign -o+uuid |tee out
grep $UUID1 out
not vgrename $UUID1 $vg2
vgs --foreign -o+uuid |tee out
grep $UUID1 out
lvs --foreign
aux wipefs_a "$dev1"
# b. 0 local, 2 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux enable_dev "$dev1"
not vgrename $vg1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
not grep $vg2 out
grep $UUID1 out
grep $UUID2 out
not vgrename $UUID1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
not grep $vg2 out
grep $UUID1 out
grep $UUID2 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
# c. 1 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux enable_dev "$dev1"
vgrename $vg1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
not vgrename $vg2 $vg1
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
# d. 1 local, 2 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n $lv1 -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n $lv1 -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other2" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
vgrename $vg1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $vg2 $vg1
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
# e. 2 local, 0 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux enable_dev "$dev1"
not vgrename $vg1 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
not grep $vg2 out
grep $UUID1 out
grep $UUID2 out
vgrename $UUID1 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
not vgrename $UUID2 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
# f. 2 local, 1 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n $lv1 -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
vgchange -y --systemid "other" $vg1
aux enable_dev "$dev1"
aux enable_dev "$dev2"
lvs --foreign
not vgrename $vg1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
not grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgrename $UUID1 $vg2
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgrename $vg1 $vg3
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $vg2 $vg1
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $vg2 $vg3
vgs --foreign -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"
# g. 3 local, 0 foreign
# setup
vgcreate $vg1 "$dev1"
lvcreate -n $lv1 -l1 -an $vg1
UUID1=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev1"
vgcreate $vg1 "$dev2"
lvcreate -n ${lv1}_b -l1 -an $vg1
UUID2=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux disable_dev "$dev2"
vgcreate $vg1 "$dev3"
lvcreate -n ${lv1}_c -l1 -an $vg1
UUID3=$(vgs --noheading -o vg_uuid $vg1 | xargs)
aux enable_dev "$dev1"
aux enable_dev "$dev2"
not vgrename $vg1 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
not grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgrename $UUID1 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $vg1 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $vg1 $vg3
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
not grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
not vgrename $UUID2 $vg2
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
not grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
vgrename $UUID2 $vg3
vgs -o+uuid |tee out
lvs --foreign
grep $vg1 out
grep $vg2 out
grep $vg3 out
grep $UUID1 out
grep $UUID2 out
grep $UUID3 out
lvs --foreign
aux wipefs_a "$dev1"
aux wipefs_a "$dev2"
aux wipefs_a "$dev3"

View File

@@ -0,0 +1,183 @@
#!/usr/bin/env bash
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_integrity 1 5 0 || skip
losetup -h | grep sector-size || skip
# Tests with fs block sizes require a libblkid version that shows BLOCK_SIZE
aux prepare_devs 1
vgcreate $vg "$dev1"
lvcreate -n $lv1 -l8 $vg
mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
blkid "$DM_DEV_DIR/$vg/$lv1" | grep BLOCK_SIZE || skip
lvchange -an $vg
vgremove -ff $vg
dd if=/dev/zero of=loopa bs=$((1024*1024)) count=64 2> /dev/null
dd if=/dev/zero of=loopb bs=$((1024*1024)) count=64 2> /dev/null
dd if=/dev/zero of=loopc bs=$((1024*1024)) count=64 2> /dev/null
dd if=/dev/zero of=loopd bs=$((1024*1024)) count=64 2> /dev/null
LOOP1=$(losetup -f loopa --show)
LOOP2=$(losetup -f loopb --show)
LOOP3=$(losetup -f loopc --sector-size 4096 --show)
LOOP4=$(losetup -f loopd --sector-size 4096 --show)
echo $LOOP1
echo $LOOP2
echo $LOOP3
echo $LOOP4
aux extend_filter "a|$LOOP1|"
aux extend_filter "a|$LOOP2|"
aux extend_filter "a|$LOOP3|"
aux extend_filter "a|$LOOP4|"
aux lvmconf 'devices/scan = "/dev"'
vgcreate $vg1 $LOOP1 $LOOP2
vgcreate $vg2 $LOOP3 $LOOP4
# lvcreate on dev512, result 512
lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg1
pvck --dump metadata $LOOP1 | grep 'block_size = 512'
lvremove -y $vg1/$lv1
# lvcreate on dev4k, result 4k
lvcreate --type raid1 -m1 --raidintegrity y -l 8 -n $lv1 $vg2
pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
lvremove -y $vg2/$lv1
# lvcreate --bs 512 on dev4k, result fail
not lvcreate --type raid1 -m1 --raidintegrity y --raidintegrityblocksize 512 -l 8 -n $lv1 $vg2
# lvcreate --bs 4096 on dev512, result 4k
lvcreate --type raid1 -m1 --raidintegrity y --raidintegrityblocksize 4096 -l 8 -n $lv1 $vg1
pvck --dump metadata $LOOP1 | grep 'block_size = 4096'
lvremove -y $vg1/$lv1
# Test an unknown fs block size by simply not creating a fs on the lv.
# lvconvert on dev512, fsunknown, result 512
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
# clear any residual fs so that libblkid cannot find an fs block size
aux wipefs_a /dev/$vg1/$lv1
lvconvert --raidintegrity y $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 512'
lvremove -y $vg1/$lv1
# lvconvert on dev4k, fsunknown, result 4k
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
# clear any residual fs so that libblkid cannot find an fs block size
aux wipefs_a /dev/$vg2/$lv1
lvconvert --raidintegrity y $vg2/$lv1
pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
lvremove -y $vg2/$lv1
# lvconvert --bs 4k on dev512, fsunknown, result fail
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
# clear any residual fs so that libblkid cannot find an fs block size
aux wipefs_a /dev/$vg1/$lv1
not lvconvert --raidintegrity y --raidintegrityblocksize 4096 $vg1/$lv1
lvremove -y $vg1/$lv1
# lvconvert --bs 512 on dev4k, fsunknown, result fail
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
# clear any residual fs so that libblkid cannot find an fs block size
aux wipefs_a /dev/$vg2/$lv1
not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1
lvremove -y $vg2/$lv1
# lvconvert on dev512, xfs 512, result 512
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
aux wipefs_a /dev/$vg1/$lv1
mkfs.xfs -f "$DM_DEV_DIR/$vg1/$lv1"
lvconvert --raidintegrity y $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 512'
lvremove -y $vg1/$lv1
# lvconvert on dev4k, xfs 4096, result 4096
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
aux wipefs_a /dev/$vg2/$lv1
mkfs.xfs -f "$DM_DEV_DIR/$vg2/$lv1"
lvconvert --raidintegrity y $vg2/$lv1
pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
lvremove -y $vg2/$lv1
# lvconvert on dev512, ext4 1024, result 1024
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
aux wipefs_a /dev/$vg1/$lv1
mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1"
lvconvert --raidintegrity y $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 1024'
lvremove -y $vg1/$lv1
# lvconvert on dev4k, ext4 4096, result 4096
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
aux wipefs_a /dev/$vg2/$lv1
mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1"
lvconvert --raidintegrity y $vg2/$lv1
pvck --dump metadata $LOOP3 | grep 'block_size = 4096'
lvremove -y $vg2/$lv1
# lvconvert --bs 512 on dev512, xfs 4096, result 512
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
aux wipefs_a /dev/$vg1/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1"
lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 512'
lvremove -y $vg1/$lv1
# lvconvert --bs 1024 on dev512, xfs 4096, result 1024
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
aux wipefs_a /dev/$vg1/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg1/$lv1"
lvconvert --raidintegrity y --raidintegrityblocksize 1024 $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 1024'
lvremove -y $vg1/$lv1
# lvconvert --bs 512 on dev512, ext4 1024, result 512
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg1
aux wipefs_a /dev/$vg1/$lv1
mkfs.ext4 -b 1024 "$DM_DEV_DIR/$vg1/$lv1"
lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg1/$lv1
pvck --dump metadata $LOOP1 | grep 'block_size = 512'
lvremove -y $vg1/$lv1
# lvconvert --bs 512 on dev4k, ext4 4096, result fail
lvcreate --type raid1 -m1 -l 8 -n $lv1 $vg2
aux wipefs_a /dev/$vg2/$lv1
mkfs.ext4 "$DM_DEV_DIR/$vg2/$lv1"
not lvconvert --raidintegrity y --raidintegrityblocksize 512 $vg2/$lv1
lvremove -y $vg2/$lv1
# FIXME: need to use scsi_debug to create devs with LBS 512 PBS 4k
# FIXME: lvconvert, fsunknown, LBS 512, PBS 4k: result 512
# FIXME: lvconvert --bs 512, fsunknown, LBS 512, PBS 4k: result 512
# FIXME: lvconvert --bs 4k, fsunknown, LBS 512, PBS 4k: result 4k
vgremove -ff $vg1
vgremove -ff $vg2
losetup -d $LOOP1
losetup -d $LOOP2
losetup -d $LOOP3
losetup -d $LOOP4
rm loopa
rm loopb
rm loopc
rm loopd

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2008-2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
test_description='Test vgs with duplicate vg names'
SKIP_WITH_LVMLOCKD=1
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_devs 2
pvcreate "$dev1"
pvcreate "$dev2"
aux disable_dev "$dev1" "$dev2"
aux enable_dev "$dev1"
vgcreate $vg1 "$dev1"
UUID1=$(vgs --noheading -o vg_uuid $vg1)
aux disable_dev "$dev1"
aux enable_dev "$dev2"
vgcreate $vg1 "$dev2"
UUID2=$(vgs --noheading -o vg_uuid $vg1)
aux enable_dev "$dev1"
pvscan --cache "$dev1"
pvs "$dev1"
pvs "$dev2"
vgs -o+vg_uuid | tee err
grep $UUID1 err
grep $UUID2 err
# should we specify and test which should be displayed?
# vgs --noheading -o vg_uuid $vg1 >err
# grep $UUID1 err
aux disable_dev "$dev2"
vgs -o+vg_uuid | tee err
grep $UUID1 err
not grep $UUID2 err
aux enable_dev "$dev2"
pvscan --cache "$dev2"
aux disable_dev "$dev1"
vgs -o+vg_uuid | tee err
grep $UUID2 err
not grep $UUID1 err
aux enable_dev "$dev1"

View File

@@ -50,6 +50,17 @@ check vg_field $vg1 systemid "$SID"
vgremove $vg1
fi
## appmachineid
lvm version > lvmver
if grep app-machineid lvmver; then
aux lvmconf "global/system_id_source = appmachineid"
lvm systemid | awk '{ print $3 }' > sid_lvm
vgcreate $vg1 "$dev1"
vgs -o systemid --noheadings $vg1 | awk '{print $1}' > sid_vg
diff sid_lvm sid_vg
vgremove $vg1
fi
## uname
SID1=$(uname -n)

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# test foreing user of thin-pool
SKIP_WITH_LVMPOLLD=1
. lib/inittest
cleanup_mounted_and_teardown()
{
dmsetup remove $THIN || true
vgremove -ff $vg
aux teardown
}
#
# Main
#
aux have_thin 1 0 0 || skip
which mkfs.ext4 || skip
# Use our mkfs config file to get approximately same results
# TODO: maybe use it for all test via some 'prepare' function
export MKE2FS_CONFIG="$TESTOLDPWD/lib/mke2fs.conf"
aux prepare_vg 2 64
# Create named pool only
lvcreate -L2 -T $vg/pool
POOL="$vg-pool"
THIN="${PREFIX}_thin"
# Foreing user is using own ioctl command to create thin devices
dmsetup message $POOL 0 "create_thin 0"
dmsetup message $POOL 0 "set_transaction_id 0 2"
# Once the transaction id has changed, lvm2 shall not be able to create thinLV
fail lvcreate -V10 $vg/pool
trap 'cleanup_mounted_and_teardown' EXIT
# 20M thin device
dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0"
mkfs.ext4 "$DM_DEV_DIR/mapper/$THIN"
dmsetup remove "$THIN"
lvchange -an $vg/pool
# Repair thin-pool used by 'foreing' apps (setting their own tid)
lvconvert --repair $vg/pool 2>&1 | tee out
not grep "Transaction id" out
lvchange -ay $vg/pool
dmsetup create "$THIN" --table "0 40960 thin $DM_DEV_DIR/mapper/$POOL 0"
fsck -n "$DM_DEV_DIR/mapper/$THIN"

View File

@@ -139,8 +139,10 @@ mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
cp pattern1 $mount_dir/pattern1
ls -l $mount_dir
lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
# TODO BZ 1808012 - can not convert active volume to writecache:
not lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
if false; then
check lv_field $vg/$lv1 segtype writecache
lvs -a $vg/${lv2}_cvol --noheadings -o segtype >out
@@ -158,6 +160,7 @@ mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
diff pattern1 $mount_dir/pattern1
diff pattern1 $mount_dir/pattern1.after
fi
umount $mount_dir
lvchange -an $vg/$lv1

View File

@@ -515,6 +515,14 @@ arg(pvmetadatacopies_ARG, '\0', "pvmetadatacopies", pvmetadatacopies_VAL, 0, 0,
arg(raidintegrity_ARG, '\0', "raidintegrity", bool_VAL, 0, 0,
"Enable or disable data integrity checksums for raid images.\n")
arg(raidintegrityblocksize_ARG, '\0', "raidintegrityblocksize", number_VAL, 0, 0,
"The block size to use for dm-integrity on raid images.\n"
"The integrity block size should usually match the device\n"
"logical block size, or the file system block size.\n"
"It may be less than the file system block size, but not\n"
"less than the device logical block size.\n"
"Possible values: 512, 1024, 2048, 4096.\n")
arg(raidintegritymode_ARG, '\0', "raidintegritymode", string_VAL, 0, 0,
"Use a journal (default) or bitmap for keeping integrity checksums consistent\n"
"in case of a crash. The bitmap areas are recalculated after a crash, so corruption\n"

View File

@@ -766,7 +766,7 @@ FLAGS: SECONDARY_SYNTAX
---
lvconvert --raidintegrity Bool LV_raid
OO: --raidintegritymode String, OO_LVCONVERT
OO: --raidintegritymode String, --raidintegrityblocksize Number, OO_LVCONVERT
OP: PV ...
ID: lvconvert_integrity
DESC: Add or remove data integrity checksums to raid images.
@@ -887,7 +887,7 @@ DESC: Create a raid1 or mirror LV (infers --type raid1|mirror).
lvcreate --type raid --size SizeMB VG
OO: --mirrors PNumber, --stripes Number, --stripesize SizeKB,
--regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB,
--raidintegrity Bool, --raidintegritymode String, OO_LVCREATE
--raidintegrity Bool, --raidintegritymode String, --raidintegrityblocksize Number, OO_LVCREATE
OP: PV ...
ID: lvcreate_raid_any
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1).

View File

@@ -1799,8 +1799,8 @@ static int _lvconvert_raid_types(struct cmd_context *cmd, struct logical_volume
* If operations differ between striped and linear, split this case.
*/
if (segtype_is_striped(seg->segtype) || segtype_is_linear(seg->segtype)) {
if (!_convert_striped(cmd, lv, lp))
goto_out;
ret = _convert_striped(cmd, lv, lp);
goto out;
}
/*
@@ -2412,7 +2412,8 @@ static int _lvconvert_thin_pool_repair(struct cmd_context *cmd,
goto deactivate_mlv;
}
if (thin_dump[0]) {
/* Check matching transactionId when thin-pool is used by lvm2 (transactionId != 0) */
if (first_seg(pool_lv)->transaction_id && thin_dump[0]) {
argv[0] = thin_dump;
argv[1] = pms_path;
argv[2] = NULL;
@@ -5858,6 +5859,9 @@ static int _lvconvert_integrity_single(struct cmd_context *cmd,
if (!integrity_mode_set(arg_str_value(cmd, raidintegritymode_ARG, NULL), &settings))
return_ECMD_FAILED;
if (arg_is_set(cmd, raidintegrityblocksize_ARG))
settings.block_size = arg_int_value(cmd, raidintegrityblocksize_ARG, 0);
if (arg_int_value(cmd, raidintegrity_ARG, 0))
ret = _lvconvert_integrity_add(cmd, lv, &settings);
else

View File

@@ -860,7 +860,8 @@ static int _lvcreate_params(struct cmd_context *cmd,
raidmaxrecoveryrate_ARG,\
raidminrecoveryrate_ARG, \
raidintegrity_ARG, \
raidintegritymode_ARG
raidintegritymode_ARG, \
raidintegrityblocksize_ARG
#define SIZE_ARGS \
extents_ARG,\
@@ -1229,8 +1230,10 @@ static int _lvcreate_params(struct cmd_context *cmd,
}
}
if (seg_is_raid(lp)) {
lp->raidintegrity = arg_int_value(cmd, raidintegrity_ARG, 0);
if (seg_is_raid(lp) && arg_int_value(cmd, raidintegrity_ARG, 0)) {
lp->raidintegrity = 1;
if (arg_is_set(cmd, raidintegrityblocksize_ARG))
lp->integrity_settings.block_size = arg_int_value(cmd, raidintegrityblocksize_ARG, 0);
if (arg_is_set(cmd, raidintegritymode_ARG)) {
if (!integrity_mode_set(arg_str_value(cmd, raidintegritymode_ARG, NULL), &lp->integrity_settings))
return_0;

View File

@@ -582,7 +582,7 @@ static int _online_pvscan_single(struct metadata_area *mda, void *baton)
if (mda_is_ignored(mda))
return 1;
vg = mda->ops->vg_read(b->fid, "", mda, NULL, NULL);
vg = mda->ops->vg_read(b->cmd, b->fid, "", mda, NULL, NULL);
if (!vg) {
/*
* Many or most cases of bad metadata would be found in

View File

@@ -1853,8 +1853,6 @@ static int _resolve_duplicate_vgnames(struct cmd_context *cmd,
if (lvmcache_vg_is_foreign(cmd, vgnl->vg_name, vgnl->vgid)) {
if (!id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid)))
stack;
log_warn("WARNING: Ignoring foreign VG with matching name %s UUID %s.",
vgnl->vg_name, uuid);
dm_list_del(&vgnl->list);
} else {
found++;

View File

@@ -315,6 +315,8 @@ retry_name:
goto_out;
log_debug("Using new VG name %s.", vp.new_vgname);
lvmcache_destroy(cmd, 1, 0);
/*
* Create a device filter so that we are only working with the devices
* in arg_import. With the original devs hidden (that arg_import were
@@ -325,7 +327,7 @@ retry_name:
init_internal_filtering(1);
dm_list_iterate_items(vd, &vp.arg_import)
internal_filter_allow(cmd->mem, vd->dev);
lvmcache_destroy(cmd, 1, 0);
refresh_filters(cmd);
log_debug("Changing VG %s to %s.", vp.old_vgname, vp.new_vgname);

View File

@@ -21,10 +21,8 @@ static struct volume_group *_vgmerge_vg_read(struct cmd_context *cmd,
struct volume_group *vg;
log_verbose("Checking for volume group \"%s\"", vg_name);
vg = vg_read_for_update(cmd, vg_name, NULL, 0, 0);
if (vg_read_error(vg)) {
release_vg(vg);
if (!vg)
return NULL;
}
if (vg_is_shared(vg)) {
log_error("vgmerge not allowed for lock_type %s", vg->lock_type);

View File

@@ -183,7 +183,7 @@ int vgrename(struct cmd_context *cmd, int argc, char **argv)
vg_name_new = skip_dev_dir(cmd, argv[1], NULL);
if (!validate_vg_rename_params(cmd, vg_name_old, vg_name_new))
return_0;
return_ECMD_FAILED;
if (!(vp.vg_name_old = dm_pool_strdup(cmd->mem, vg_name_old)))
return_ECMD_FAILED;

View File

@@ -691,7 +691,7 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv)
vg_to = vg_read_for_update(cmd, vg_name_to, NULL, 0, 0);
if (vg_read_error(vg_to)) {
if (!vg_to) {
log_error("Volume group \"%s\" became inconsistent: "
"please fix manually", vg_name_to);
goto bad;