1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/lib/device/dev-cache.c

1542 lines
36 KiB
C
Raw Normal View History

/*
2008-01-30 17:00:02 +03:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
2002-11-18 17:01:16 +03:00
#include "lib.h"
2001-10-25 15:34:55 +04:00
#include "btree.h"
#include "config.h"
#include "toolcontext.h"
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
#include "dm-ioctl.h" /* for DM_UUID_LEN */
#include "lvm-string.h" /* for LVM's UUID_PREFIX */
#ifdef UDEV_SYNC_SUPPORT
#include <libudev.h>
#endif
#include <unistd.h>
#include <sys/param.h>
#include <dirent.h>
struct dev_iter {
2001-10-25 15:34:55 +04:00
struct btree_iter *current;
struct dev_filter *filter;
};
struct dir_list {
struct dm_list list;
char dir[0];
};
static struct {
struct dm_pool *mem;
struct dm_hash_table *names;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
struct dm_hash_table *vgid_index;
struct dm_hash_table *lvid_index;
struct btree *sysfs_only_devices; /* see comments in _get_device_for_sysfs_dev_name_using_devno */
2001-10-25 15:34:55 +04:00
struct btree *devices;
struct dm_regex *preferred_names_matcher;
const char *dev_dir;
int has_scanned;
struct dm_list dirs;
struct dm_list files;
} _cache;
#define _zalloc(x) dm_pool_zalloc(_cache.mem, (x))
#define _free(x) dm_pool_free(_cache.mem, (x))
#define _strdup(x) dm_pool_strdup(_cache.mem, (x))
static int _insert(const char *path, const struct stat *info,
int rec, int check_with_udev_db);
2001-10-25 15:34:55 +04:00
/* Setup non-zero members of passed zeroed 'struct device' */
static void _dev_init(struct device *dev, int max_error_count)
{
dev->phys_block_size = -1;
dev->block_size = -1;
dev->fd = -1;
dev->read_ahead = -1;
dev->max_error_count = max_error_count;
dev->ext.enabled = 0;
dev->ext.src = DEV_EXT_NONE;
dm_list_init(&dev->aliases);
dm_list_init(&dev->open_list);
}
void dev_destroy_file(struct device *dev)
{
if (!(dev->flags & DEV_ALLOCED))
return;
dm_free((void *) dm_list_item(dev->aliases.n, struct dm_str_list)->str);
dm_free(dev->aliases.n);
dm_free(dev);
}
struct device *dev_create_file(const char *filename, struct device *dev,
struct dm_str_list *alias, int use_malloc)
{
int allocate = !dev;
if (allocate) {
if (use_malloc) {
if (!(dev = dm_zalloc(sizeof(*dev)))) {
log_error("struct device allocation failed");
return NULL;
}
if (!(alias = dm_zalloc(sizeof(*alias)))) {
log_error("struct dm_str_list allocation failed");
dm_free(dev);
return NULL;
}
if (!(alias->str = dm_strdup(filename))) {
2005-05-09 21:02:52 +04:00
log_error("filename strdup failed");
dm_free(dev);
dm_free(alias);
2005-05-09 21:02:52 +04:00
return NULL;
}
} else {
if (!(dev = _zalloc(sizeof(*dev)))) {
log_error("struct device allocation failed");
return NULL;
}
if (!(alias = _zalloc(sizeof(*alias)))) {
log_error("struct dm_str_list allocation failed");
_free(dev);
return NULL;
}
2005-05-09 21:02:52 +04:00
if (!(alias->str = _strdup(filename))) {
log_error("filename strdup failed");
_free(dev);
2005-05-09 21:02:52 +04:00
return NULL;
}
}
} else if (!(alias->str = dm_strdup(filename))) {
log_error("filename strdup failed");
return NULL;
}
_dev_init(dev, NO_DEV_ERROR_COUNT_LIMIT);
dev->flags = DEV_REGULAR | ((use_malloc) ? DEV_ALLOCED : 0);
dm_list_add(&dev->aliases, &alias->list);
return dev;
}
static struct device *_dev_create(dev_t d)
2001-10-25 15:34:55 +04:00
{
struct device *dev;
if (!(dev = _zalloc(sizeof(*dev)))) {
log_error("struct device allocation failed");
return NULL;
2001-10-25 15:34:55 +04:00
}
_dev_init(dev, dev_disable_after_error_count());
2001-10-25 15:34:55 +04:00
dev->dev = d;
2001-10-25 15:34:55 +04:00
return dev;
}
void dev_set_preferred_name(struct dm_str_list *sl, struct device *dev)
{
/*
* Don't interfere with ordering specified in config file.
*/
if (_cache.preferred_names_matcher)
return;
log_debug_devs("%s: New preferred name", sl->str);
dm_list_del(&sl->list);
dm_list_add_h(&dev->aliases, &sl->list);
}
/*
* Check whether path0 or path1 contains the subpath. The path that
* *does not* contain the subpath wins (return 0 or 1). If both paths
* contain the subpath, return -1. If none of them contains the subpath,
* return -2.
*/
static int _builtin_preference(const char *path0, const char *path1,
size_t skip_prefix_count, const char *subpath)
{
size_t subpath_len;
int r0, r1;
subpath_len = strlen(subpath);
r0 = !strncmp(path0 + skip_prefix_count, subpath, subpath_len);
r1 = !strncmp(path1 + skip_prefix_count, subpath, subpath_len);
if (!r0 && r1)
/* path0 does not have the subpath - it wins */
return 0;
else if (r0 && !r1)
/* path1 does not have the subpath - it wins */
return 1;
else if (r0 && r1)
/* both of them have the subpath */
return -1;
/* no path has the subpath */
return -2;
}
static int _apply_builtin_path_preference_rules(const char *path0, const char *path1)
{
size_t devdir_len;
int r;
devdir_len = strlen(_cache.dev_dir);
if (!strncmp(path0, _cache.dev_dir, devdir_len) &&
!strncmp(path1, _cache.dev_dir, devdir_len)) {
/*
* We're trying to achieve the ordering:
* /dev/block/ < /dev/dm-* < /dev/disk/ < /dev/mapper/ < anything else
*/
/* Prefer any other path over /dev/block/ path. */
if ((r = _builtin_preference(path0, path1, devdir_len, "block/")) >= -1)
return r;
/* Prefer any other path over /dev/dm-* path. */
if ((r = _builtin_preference(path0, path1, devdir_len, "dm-")) >= -1)
return r;
/* Prefer any other path over /dev/disk/ path. */
if ((r = _builtin_preference(path0, path1, devdir_len, "disk/")) >= -1)
return r;
/* Prefer any other path over /dev/mapper/ path. */
if ((r = _builtin_preference(path0, path1, 0, dm_dir())) >= -1)
return r;
}
return -1;
}
/* Return 1 if we prefer path1 else return 0 */
static int _compare_paths(const char *path0, const char *path1)
{
int slash0 = 0, slash1 = 0;
int m0, m1;
const char *p;
char p0[PATH_MAX], p1[PATH_MAX];
char *s0, *s1;
struct stat stat0, stat1;
int r;
2007-04-26 21:14:57 +04:00
/*
* FIXME Better to compare patterns one-at-a-time against all names.
*/
if (_cache.preferred_names_matcher) {
m0 = dm_regex_match(_cache.preferred_names_matcher, path0);
m1 = dm_regex_match(_cache.preferred_names_matcher, path1);
if (m0 != m1) {
if (m0 < 0)
return 1;
if (m1 < 0)
return 0;
if (m0 < m1)
return 1;
if (m1 < m0)
return 0;
}
}
/* Apply built-in preference rules first. */
if ((r = _apply_builtin_path_preference_rules(path0, path1)) >= 0)
return r;
/* Return the path with fewer slashes */
for (p = path0; p++; p = (const char *) strchr(p, '/'))
slash0++;
for (p = path1; p++; p = (const char *) strchr(p, '/'))
slash1++;
if (slash0 < slash1)
return 0;
if (slash1 < slash0)
return 1;
strncpy(p0, path0, sizeof(p0) - 1);
p0[sizeof(p0) - 1] = '\0';
strncpy(p1, path1, sizeof(p1) - 1);
p1[sizeof(p1) - 1] = '\0';
s0 = p0 + 1;
s1 = p1 + 1;
/*
* If we reach here, both paths are the same length.
* Now skip past identical path components.
*/
while (*s0 && *s0 == *s1)
s0++, s1++;
/* We prefer symlinks - they exist for a reason!
* So we prefer a shorter path before the first symlink in the name.
* FIXME Configuration option to invert this? */
while (s0) {
s0 = strchr(s0, '/');
s1 = strchr(s1, '/');
if (s0) {
*s0 = '\0';
*s1 = '\0';
}
if (lstat(p0, &stat0)) {
log_sys_very_verbose("lstat", p0);
return 1;
}
if (lstat(p1, &stat1)) {
log_sys_very_verbose("lstat", p1);
return 0;
}
if (S_ISLNK(stat0.st_mode) && !S_ISLNK(stat1.st_mode))
return 0;
if (!S_ISLNK(stat0.st_mode) && S_ISLNK(stat1.st_mode))
return 1;
if (s0) {
*s0++ = '/';
*s1++ = '/';
}
}
/* ASCII comparison */
if (strcmp(path0, path1) < 0)
return 0;
else
return 1;
}
2001-10-25 15:34:55 +04:00
static int _add_alias(struct device *dev, const char *path)
{
struct dm_str_list *sl = _zalloc(sizeof(*sl));
struct dm_str_list *strl;
const char *oldpath;
int prefer_old = 1;
2001-10-25 15:34:55 +04:00
2008-01-30 16:19:47 +03:00
if (!sl)
return_0;
2001-10-25 15:34:55 +04:00
/* Is name already there? */
dm_list_iterate_items(strl, &dev->aliases) {
2005-06-01 20:51:55 +04:00
if (!strcmp(strl->str, path)) {
log_debug_devs("%s: Already in device cache", path);
return 1;
}
}
sl->str = path;
2001-10-25 15:34:55 +04:00
if (!dm_list_empty(&dev->aliases)) {
oldpath = dm_list_item(dev->aliases.n, struct dm_str_list)->str;
prefer_old = _compare_paths(path, oldpath);
log_debug_devs("%s: Aliased to %s in device cache%s (%d:%d)",
path, oldpath, prefer_old ? "" : " (preferred name)",
(int) MAJOR(dev->dev), (int) MINOR(dev->dev));
} else
log_debug_devs("%s: Added to device cache (%d:%d)", path,
(int) MAJOR(dev->dev), (int) MINOR(dev->dev));
if (prefer_old)
dm_list_add(&dev->aliases, &sl->list);
else
dm_list_add_h(&dev->aliases, &sl->list);
2001-10-25 15:34:55 +04:00
return 1;
}
static int _get_sysfs_value(const char *path, char *buf, size_t buf_size, int error_if_no_value)
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
{
FILE *fp;
size_t len;
int r = 0;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (!(fp = fopen(path, "r"))) {
log_sys_error("fopen", path);
return 0;
}
if (!fgets(buf, buf_size, fp)) {
log_sys_error("fgets", path);
goto out;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
}
if ((len = strlen(buf)) && buf[len - 1] == '\n')
buf[--len] = '\0';
if (!len && error_if_no_value)
log_error("_get_sysfs_value: %s: no value", path);
else
r = 1;
out:
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (fclose(fp))
log_sys_error("fclose", path);
return r;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
}
static int _get_dm_uuid_from_sysfs(char *buf, size_t buf_size, int major, int minor)
{
char path[PATH_MAX];
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/dm/uuid", dm_sysfs_dir(), major, minor) < 0) {
log_error("%d:%d: dm_snprintf failed for path to sysfs dm directory.", major, minor);
return 0;
}
return _get_sysfs_value(path, buf, buf_size, 0);
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
}
static struct dm_list *_get_or_add_list_by_index_key(struct dm_hash_table *idx, const char *key)
{
struct dm_list *list;
if ((list = dm_hash_lookup(idx, key)))
return list;
if (!(list = _zalloc(sizeof(*list)))) {
log_error("%s: failed to allocate device list for device cache index.", key);
return NULL;
}
dm_list_init(list);
if (!dm_hash_insert(idx, key, list)) {
log_error("%s: failed to insert device list to device cache index.", key);
return NULL;
}
return list;
}
static struct device *_insert_sysfs_dev(dev_t devno, const char *devname)
{
static struct device _fake_dev = { .flags = DEV_USED_FOR_LV };
struct stat stat0;
char path[PATH_MAX];
char *path_copy;
struct device *dev;
if (dm_snprintf(path, sizeof(path), "%s%s", _cache.dev_dir, devname) < 0) {
log_error("_insert_sysfs_dev: %s: dm_snprintf failed", devname);
return NULL;
}
if (lstat(path, &stat0) < 0) {
/* When device node does not exist return fake entry.
* This may happen when i.e. lvm2 device dir != /dev */
log_debug("%s: Not available device node", path);
return &_fake_dev;
}
if (!(dev = _dev_create(devno)))
return_NULL;
if (!(path_copy = dm_pool_strdup(_cache.mem, path))) {
log_error("_insert_sysfs_dev: %s: dm_pool_strdup failed", devname);
return NULL;
}
if (!_add_alias(dev, path_copy)) {
log_error("Couldn't add alias to dev cache.");
_free(dev);
return NULL;
}
if (!btree_insert(_cache.sysfs_only_devices, (uint32_t) devno, dev)) {
log_error("Couldn't add device to binary tree of sysfs-only devices in dev cache.");
_free(dev);
return NULL;
}
return dev;
}
static struct device *_get_device_for_sysfs_dev_name_using_devno(const char *devname)
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
{
char path[PATH_MAX];
char buf[PATH_MAX];
int major, minor;
dev_t devno;
struct device *dev;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (dm_snprintf(path, sizeof(path), "%sblock/%s/dev", dm_sysfs_dir(), devname) < 0) {
log_error("_get_device_for_sysfs_dev_name_using_devno: %s: dm_snprintf failed", devname);
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
return NULL;
}
if (!_get_sysfs_value(path, buf, sizeof(buf), 1))
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
return_NULL;
if (sscanf(buf, "%d:%d", &major, &minor) != 2) {
log_error("_get_device_for_sysfs_dev_name_using_devno: %s: failed to get major and minor number", devname);
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
return NULL;
}
devno = MKDEV((dev_t)major, (dev_t)minor);
if (!(dev = (struct device *) btree_lookup(_cache.devices, (uint32_t) devno))) {
/*
* If we get here, it means the device is referenced in sysfs, but it's not yet in /dev.
* This may happen in some rare cases right after LVs get created - we sync with udev
* (or alternatively we create /dev content ourselves) while VG lock is held. However,
* dev scan is done without VG lock so devices may already be in sysfs, but /dev may
* not be updated yet if we call LVM command right after LV creation. This is not a
* problem with devtmpfs as there's at least kernel name for device in /dev as soon
* as the sysfs item exists, but we still support environments without devtmpfs or
* where different directory for dev nodes is used (e.g. our test suite). So track
* such devices in _cache.sysfs_only_devices hash for the vgid/lvid check to work still.
*/
if (!(dev = (struct device *) btree_lookup(_cache.sysfs_only_devices, (uint32_t) devno)) &&
!(dev = _insert_sysfs_dev(devno, devname)))
return_NULL;
}
return dev;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
}
#define NOT_LVM_UUID "-"
static int _get_vgid_and_lvid_for_dev(struct device *dev)
{
static size_t lvm_prefix_len = sizeof(UUID_PREFIX) - 1;
static size_t lvm_uuid_len = sizeof(UUID_PREFIX) - 1 + 2 * ID_LEN;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
char uuid[DM_UUID_LEN];
size_t uuid_len;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (!_get_dm_uuid_from_sysfs(uuid, sizeof(uuid), (int) MAJOR(dev->dev), (int) MINOR(dev->dev)))
return_0;
uuid_len = strlen(uuid);
/*
* UUID for LV is either "LVM-<vg_uuid><lv_uuid>" or "LVM-<vg_uuid><lv_uuid>-<suffix>",
* where vg_uuid and lv_uuid has length of ID_LEN and suffix len is not restricted
* (only restricted by whole DM UUID max len).
*/
if (((uuid_len == lvm_uuid_len) ||
((uuid_len > lvm_uuid_len) && (uuid[lvm_uuid_len] == '-'))) &&
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
!strncmp(uuid, UUID_PREFIX, lvm_prefix_len)) {
/* Separate VGID and LVID part from DM UUID. */
if (!(dev->vgid = dm_pool_strndup(_cache.mem, uuid + lvm_prefix_len, ID_LEN)) ||
!(dev->lvid = dm_pool_strndup(_cache.mem, uuid + lvm_prefix_len + ID_LEN, ID_LEN)))
return_0;
} else
dev->vgid = dev->lvid = NOT_LVM_UUID;
return 1;
}
static int _index_dev_by_vgid_and_lvid(struct device *dev)
{
const char *devname = dev_name(dev);
char devpath[PATH_MAX];
char path[PATH_MAX];
DIR *d;
struct dirent *dirent;
struct device *holder_dev;
struct dm_list *vgid_list, *lvid_list;
struct device_list *dl_vgid, *dl_lvid;
int r = 0;
if (dev->flags & DEV_USED_FOR_LV)
dev-cache: also index VGIDs and LVIDs if using persistent .cache file If we're using persistent .cache file, we're reading this file instead of traversing the /dev content. Fix missing indexing by VGID and LVID here - hook this into persistent_filter_load where we populate device cache from persistent .cache file instead of scanning /dev. For example, inducing situation in which we warn about different device actually used than what LVM thinks should be used based on metadata: $ lsblk -s /dev/vg/lvol0 NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vg-lvol0 253:4 0 124M 0 lvm `-loop1 7:1 0 128M 0 loop $ lvmconfig --type diff global { use_lvmetad=0 } devices { obtain_device_list_from_udev=0 } (obtain_device_list_from_udev=0 also means the persistent .cache file is used) Before this patch - pvs is fine as it does the dev scan, but lvs relies on persistent .cache file and it misses the VGID/LVID indices to check and warn about incorrect devices used: $ pvs Found duplicate PV B9gXTHkIdEIiMVwcOoT2LX3Ywh4YIHgR: using /dev/loop0 not /dev/loop1 Using duplicate PV /dev/loop0 without holders, ignoring /dev/loop1 WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/loop1 instead of /dev/loop0. PV VG Fmt Attr PSize PFree /dev/loop0 vg lvm2 a-- 124.00m 0 $ lvs Found duplicate PV B9gXTHkIdEIiMVwcOoT2LX3Ywh4YIHgR: using /dev/loop0 not /dev/loop1 Using duplicate PV /dev/loop0 without holders, ignoring /dev/loop1 LV VG Attr LSize lvol0 vg -wi-a----- 124.00m With this patch applied - both pvs and lvs is fine - the indices are always created correctly (lvs just an example here, other LVM commands that rely on persistent .cache file are fixed with this patch too): $ pvs Found duplicate PV B9gXTHkIdEIiMVwcOoT2LX3Ywh4YIHgR: using /dev/loop0 not /dev/loop1 Using duplicate PV /dev/loop0 without holders, ignoring /dev/loop1 WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/loop1 instead of /dev/loop0. PV VG Fmt Attr PSize PFree /dev/loop0 vg lvm2 a-- 124.00m 0 $ lvs Found duplicate PV B9gXTHkIdEIiMVwcOoT2LX3Ywh4YIHgR: using /dev/loop0 not /dev/loop1 Using duplicate PV /dev/loop0 without holders, ignoring /dev/loop1 WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/loop1 instead of /dev/loop0. LV VG Attr LSize lvol0 vg -wi-a----- 124.00m
2016-03-30 11:39:30 +03:00
/* already indexed */
return 1;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
/* Get holders for device. */
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/holders/", dm_sysfs_dir(), (int) MAJOR(dev->dev), (int) MINOR(dev->dev)) < 0) {
log_error("%s: dm_snprintf failed for path to holders directory.", devname);
return 0;
}
if (!(d = opendir(path))) {
if (errno == ENOENT) {
log_debug("%s: path does not exist, skipping", path);
return 1;
}
log_sys_error("opendir", path);
return 0;
}
/* Iterate over device's holders and look for LVs. */
while ((dirent = readdir(d))) {
if (!strcmp(".", dirent->d_name) ||
!strcmp("..", dirent->d_name))
continue;
if (dm_snprintf(devpath, sizeof(devpath), "%s%s", _cache.dev_dir, dirent->d_name) == -1) {
log_error("%s: dm_snprintf failed for holder %s device path.", devname, dirent->d_name);
goto out;
}
if (!(holder_dev = (struct device *) dm_hash_lookup(_cache.names, devpath))) {
/*
* Cope with situation where canonical /<dev_dir>/<dirent->d_name>
* does not exist, but some other node name or symlink exists in
* non-standard environments - someone renaming the nodes or using
* mknod with different dev names than actual kernel names.
* This looks up struct device by major:minor pair which we get
* by looking at /sys/block/<dirent->d_name>/dev sysfs attribute.
*/
if (!(holder_dev = _get_device_for_sysfs_dev_name_using_devno(dirent->d_name))) {
log_error("%s: failed to find associated device structure for holder %s.", devname, devpath);
goto out;
}
}
/* We're only interested in a holder which is a DM device. */
if (!dm_is_dm_major(MAJOR(holder_dev->dev)))
continue;
/*
* And if it's a DM device, we're only interested in a holder which is an LVM device.
* Get the VG UUID and LV UUID if we don't have that already.
*/
if (!holder_dev->vgid && !_get_vgid_and_lvid_for_dev(holder_dev))
goto_out;
if (*holder_dev->vgid == *NOT_LVM_UUID)
continue;
/*
* Do not add internal LV devices to index.
* If a device is internal, the holder has the same VG UUID as the device.
*/
if (dm_is_dm_major(MAJOR(dev->dev))) {
if (!dev->vgid && !_get_vgid_and_lvid_for_dev(dev))
goto_out;
if (*dev->vgid != *NOT_LVM_UUID && !strcmp(holder_dev->vgid, dev->vgid))
continue;
}
if (!(vgid_list = _get_or_add_list_by_index_key(_cache.vgid_index, holder_dev->vgid)) ||
!(lvid_list = _get_or_add_list_by_index_key(_cache.lvid_index, holder_dev->lvid)))
goto_out;
/* Create dev list items for the holder device. */
if (!(dl_vgid = _zalloc(sizeof(*dl_vgid))) ||
!(dl_lvid = _zalloc(sizeof(*dl_lvid)))) {
log_error("%s: failed to allocate dev list item.", devname);
goto out;
}
dl_vgid->dev = dl_lvid->dev = dev;
/* Add dev list item to VGID device list if it's not there already. */
if (!(dev->flags & DEV_USED_FOR_LV))
dm_list_add(vgid_list, &dl_vgid->list);
/* Add dev list item to LVID device list. */
dm_list_add(lvid_list, &dl_lvid->list);
/* Mark device as used == also indexed in dev cache by VGID and LVID. */
dev->flags |= DEV_USED_FOR_LV;
}
r = 1;
out:
if (closedir(d))
log_sys_error("closedir", path);
return r;
}
struct dm_list *dev_cache_get_dev_list_for_vgid(const char *vgid)
{
return dm_hash_lookup(_cache.vgid_index, vgid);
}
struct dm_list *dev_cache_get_dev_list_for_lvid(const char *lvid)
{
return dm_hash_lookup(_cache.lvid_index, lvid);
}
2001-10-25 15:34:55 +04:00
/*
* Either creates a new dev, or adds an alias to
* an existing dev.
*/
static int _insert_dev(const char *path, dev_t d)
{
struct device *dev;
static dev_t loopfile_count = 0;
int loopfile = 0;
char *path_copy;
/* Generate pretend device numbers for loopfiles */
if (!d) {
if (dm_hash_lookup(_cache.names, path))
return 1;
d = ++loopfile_count;
loopfile = 1;
}
2001-10-25 15:34:55 +04:00
/* is this device already registered ? */
if (!(dev = (struct device *) btree_lookup(_cache.devices, (uint32_t) d))) {
if (!(dev = (struct device *) btree_lookup(_cache.sysfs_only_devices, (uint32_t) d))) {
/* create new device */
if (loopfile) {
if (!(dev = dev_create_file(path, NULL, NULL, 0)))
return_0;
} else if (!(dev = _dev_create(d)))
2008-01-30 16:19:47 +03:00
return_0;
}
2001-10-25 15:34:55 +04:00
if (!(btree_insert(_cache.devices, (uint32_t) d, dev))) {
log_error("Couldn't insert device into binary tree.");
2001-10-25 15:34:55 +04:00
_free(dev);
return 0;
}
}
if (!(path_copy = dm_pool_strdup(_cache.mem, path))) {
log_error("Failed to duplicate path string.");
return 0;
}
if (!loopfile && !_add_alias(dev, path_copy)) {
log_error("Couldn't add alias to dev cache.");
2001-10-25 15:34:55 +04:00
return 0;
}
if (!dm_hash_insert(_cache.names, path_copy, dev)) {
log_error("Couldn't add name to hash in dev cache.");
return 0;
}
2001-10-25 15:34:55 +04:00
return 1;
}
2001-10-08 17:58:52 +04:00
2001-10-25 15:34:55 +04:00
static char *_join(const char *dir, const char *name)
{
size_t len = strlen(dir) + strlen(name) + 2;
char *r = dm_malloc(len);
2001-10-25 15:34:55 +04:00
if (r)
snprintf(r, len, "%s/%s", dir, name);
return r;
}
/*
* Get rid of extra slashes in the path string.
*/
static void _collapse_slashes(char *str)
{
char *ptr;
int was_slash = 0;
for (ptr = str; *ptr; ptr++) {
if (*ptr == '/') {
if (was_slash)
continue;
was_slash = 1;
} else
was_slash = 0;
*str++ = *ptr;
}
*str = *ptr;
}
2001-10-25 15:34:55 +04:00
static int _insert_dir(const char *dir)
{
2001-10-25 15:34:55 +04:00
int n, dirent_count, r = 1;
struct dirent **dirent;
char *path;
2001-10-25 15:34:55 +04:00
dirent_count = scandir(dir, &dirent, NULL, alphasort);
if (dirent_count > 0) {
for (n = 0; n < dirent_count; n++) {
if (dirent[n]->d_name[0] == '.') {
free(dirent[n]);
continue;
}
2008-01-30 16:19:47 +03:00
if (!(path = _join(dir, dirent[n]->d_name)))
return_0;
2001-10-25 15:34:55 +04:00
_collapse_slashes(path);
r &= _insert(path, NULL, 1, 0);
dm_free(path);
2001-10-25 15:34:55 +04:00
free(dirent[n]);
}
free(dirent);
}
2001-10-08 17:58:52 +04:00
2001-10-25 15:34:55 +04:00
return r;
2001-10-08 17:58:52 +04:00
}
static int _insert_file(const char *path)
{
struct stat info;
if (stat(path, &info) < 0) {
log_sys_very_verbose("stat", path);
return 0;
}
if (!S_ISREG(info.st_mode)) {
log_debug_devs("%s: Not a regular file", path);
return 0;
}
2008-01-30 16:19:47 +03:00
if (!_insert_dev(path, 0))
return_0;
return 1;
}
static int _dev_cache_iterate_devs_for_index(void)
{
struct btree_iter *iter = btree_first(_cache.devices);
struct device *dev;
int r = 1;
while (iter) {
dev = btree_get_data(iter);
if (!_index_dev_by_vgid_and_lvid(dev))
r = 0;
iter = btree_next(iter);
}
return r;
}
static int _dev_cache_iterate_sysfs_for_index(const char *path)
{
char devname[PATH_MAX];
DIR *d;
struct dirent *dirent;
int major, minor;
dev_t devno;
struct device *dev;
int partial_failure = 0;
int r = 0;
if (!(d = opendir(path))) {
log_sys_error("opendir", path);
return 0;
}
while ((dirent = readdir(d))) {
if (!strcmp(".", dirent->d_name) ||
!strcmp("..", dirent->d_name))
continue;
if (sscanf(dirent->d_name, "%d:%d", &major, &minor) != 2) {
log_error("_dev_cache_iterate_sysfs_for_index: %s: failed "
"to get major and minor number", dirent->d_name);
partial_failure = 1;
continue;
}
devno = MKDEV((dev_t)major, (dev_t)minor);
if (!(dev = (struct device *) btree_lookup(_cache.devices, (uint32_t) devno)) &&
!(dev = (struct device *) btree_lookup(_cache.sysfs_only_devices, (uint32_t) devno))) {
if (!dm_device_get_name(major, minor, 1, devname, sizeof(devname)) ||
!(dev = _insert_sysfs_dev(devno, devname))) {
partial_failure = 1;
continue;
}
}
if (!_index_dev_by_vgid_and_lvid(dev))
partial_failure = 1;
}
r = !partial_failure;
if (closedir(d))
log_sys_error("closedir", path);
return r;
}
int dev_cache_index_devs(void)
{
static int sysfs_has_dev_block = -1;
char path[PATH_MAX];
if (dm_snprintf(path, sizeof(path), "%sdev/block", dm_sysfs_dir()) < 0) {
log_error("dev_cache_index_devs: dm_snprintf failed.");
return 0;
}
/* Skip indexing if /sys/dev/block is not available.*/
if (sysfs_has_dev_block == -1) {
struct stat info;
if (stat(path, &info) == 0)
sysfs_has_dev_block = 1;
else {
if (errno == ENOENT) {
sysfs_has_dev_block = 0;
return 1;
} else {
log_sys_error("stat", path);
return 0;
}
}
} else if (!sysfs_has_dev_block)
return 1;
if (obtain_device_list_from_udev() &&
udev_get_library_context())
return _dev_cache_iterate_devs_for_index(); /* with udev */
return _dev_cache_iterate_sysfs_for_index(path);
}
#ifdef UDEV_SYNC_SUPPORT
static int _device_in_udev_db(const dev_t d)
{
struct udev *udev;
struct udev_device *udev_device;
if (!(udev = udev_get_library_context()))
return_0;
if ((udev_device = udev_device_new_from_devnum(udev, 'b', d))) {
udev_device_unref(udev_device);
return 1;
}
return 0;
}
static int _insert_udev_dir(struct udev *udev, const char *dir)
{
struct udev_enumerate *udev_enum = NULL;
struct udev_list_entry *device_entry, *symlink_entry;
const char *entry_name, *node_name, *symlink_name;
struct udev_device *device;
int r = 1;
if (!(udev_enum = udev_enumerate_new(udev)))
goto bad;
if (udev_enumerate_add_match_subsystem(udev_enum, "block") ||
udev_enumerate_scan_devices(udev_enum))
goto bad;
/*
* Report any missing information as "log_very_verbose" only, do not
* report it as a "warning" or "error" - the record could be removed
* by the time we ask for more info (node name, symlink name...).
* Whatever removes *any* block device in the system (even unrelated
* to our operation), we would have a warning/error on output then.
* That could be misleading. If there's really any problem with missing
* information from udev db, we can still have a look at the verbose log.
*/
udev_list_entry_foreach(device_entry, udev_enumerate_get_list_entry(udev_enum)) {
entry_name = udev_list_entry_get_name(device_entry);
if (!(device = udev_device_new_from_syspath(udev, entry_name))) {
log_very_verbose("udev failed to return a device for entry %s.",
entry_name);
continue;
}
if (!(node_name = udev_device_get_devnode(device)))
log_very_verbose("udev failed to return a device node for entry %s.",
entry_name);
else
r &= _insert(node_name, NULL, 0, 0);
udev_list_entry_foreach(symlink_entry, udev_device_get_devlinks_list_entry(device)) {
if (!(symlink_name = udev_list_entry_get_name(symlink_entry)))
log_very_verbose("udev failed to return a symlink name for entry %s.",
entry_name);
else
r &= _insert(symlink_name, NULL, 0, 0);
}
udev_device_unref(device);
}
udev_enumerate_unref(udev_enum);
return r;
bad:
log_error("Failed to enumerate udev device list.");
udev_enumerate_unref(udev_enum);
return 0;
}
static void _insert_dirs(struct dm_list *dirs)
{
struct dir_list *dl;
struct udev *udev;
int with_udev;
with_udev = obtain_device_list_from_udev() &&
(udev = udev_get_library_context());
dm_list_iterate_items(dl, &_cache.dirs) {
if (with_udev) {
if (!_insert_udev_dir(udev, dl->dir))
log_debug_devs("%s: Failed to insert devices from "
"udev-managed directory to device "
"cache fully", dl->dir);
}
else if (!_insert_dir(dl->dir))
log_debug_devs("%s: Failed to insert devices to "
"device cache fully", dl->dir);
}
}
#else /* UDEV_SYNC_SUPPORT */
static int _device_in_udev_db(const dev_t d)
{
return 0;
}
static void _insert_dirs(struct dm_list *dirs)
{
struct dir_list *dl;
dm_list_iterate_items(dl, &_cache.dirs)
_insert_dir(dl->dir);
}
#endif /* UDEV_SYNC_SUPPORT */
static int _insert(const char *path, const struct stat *info,
int rec, int check_with_udev_db)
2001-10-08 17:58:52 +04:00
{
struct stat tinfo;
2001-10-08 17:58:52 +04:00
if (!info) {
if (stat(path, &tinfo) < 0) {
log_sys_very_verbose("stat", path);
return 0;
}
info = &tinfo;
2001-10-08 17:58:52 +04:00
}
if (check_with_udev_db && !_device_in_udev_db(info->st_rdev)) {
log_very_verbose("%s: Not in udev db", path);
return 0;
}
if (S_ISDIR(info->st_mode)) { /* add a directory */
/* check it's not a symbolic link */
if (lstat(path, &tinfo) < 0) {
log_sys_very_verbose("lstat", path);
return 0;
}
if (S_ISLNK(tinfo.st_mode)) {
log_debug_devs("%s: Symbolic link to directory", path);
return 1;
}
if (rec && !_insert_dir(path))
return_0;
2001-10-25 15:34:55 +04:00
} else { /* add a device */
if (!S_ISBLK(info->st_mode)) {
log_debug_devs("%s: Not a block device", path);
return 1;
2001-10-25 15:34:55 +04:00
}
2001-10-08 17:58:52 +04:00
if (!_insert_dev(path, info->st_rdev))
2008-01-30 16:19:47 +03:00
return_0;
}
return 1;
}
static void _full_scan(int dev_scan)
{
2005-06-01 20:51:55 +04:00
struct dir_list *dl;
if (_cache.has_scanned && !dev_scan)
return;
_insert_dirs(&_cache.dirs);
(void) dev_cache_index_devs();
dm_list_iterate_items(dl, &_cache.files)
_insert_file(dl->dir);
_cache.has_scanned = 1;
init_full_scan_done(1);
}
2002-11-18 17:01:16 +03:00
int dev_cache_has_scanned(void)
{
return _cache.has_scanned;
}
void dev_cache_scan(int do_scan)
{
if (!do_scan)
_cache.has_scanned = 1;
else
_full_scan(1);
2002-11-18 17:01:16 +03:00
}
static int _init_preferred_names(struct cmd_context *cmd)
{
const struct dm_config_node *cn;
const struct dm_config_value *v;
struct dm_pool *scratch = NULL;
const char **regex;
unsigned count = 0;
int i, r = 0;
_cache.preferred_names_matcher = NULL;
if (!(cn = find_config_tree_array(cmd, devices_preferred_names_CFG, NULL)) ||
cn->v->type == DM_CFG_EMPTY_ARRAY) {
log_very_verbose("devices/preferred_names %s: "
"using built-in preferences",
cn && cn->v->type == DM_CFG_EMPTY_ARRAY ? "is empty"
: "not found in config");
return 1;
}
for (v = cn->v; v; v = v->next) {
if (v->type != DM_CFG_STRING) {
log_error("preferred_names patterns must be enclosed in quotes");
return 0;
2008-01-30 17:00:02 +03:00
}
count++;
}
if (!(scratch = dm_pool_create("preferred device name matcher", 1024)))
return_0;
if (!(regex = dm_pool_alloc(scratch, sizeof(*regex) * count))) {
log_error("Failed to allocate preferred device name "
"pattern list.");
goto out;
}
for (v = cn->v, i = count - 1; v; v = v->next, i--) {
if (!(regex[i] = dm_pool_strdup(scratch, v->v.str))) {
log_error("Failed to allocate a preferred device name "
"pattern.");
goto out;
}
}
if (!(_cache.preferred_names_matcher =
dm_regex_create(_cache.mem, regex, count))) {
log_error("Preferred device name pattern matcher creation failed.");
goto out;
}
r = 1;
out:
dm_pool_destroy(scratch);
return r;
}
int dev_cache_init(struct cmd_context *cmd)
{
2001-10-25 15:34:55 +04:00
_cache.names = NULL;
_cache.has_scanned = 0;
2001-10-25 15:34:55 +04:00
2008-01-30 16:19:47 +03:00
if (!(_cache.mem = dm_pool_create("dev_cache", 10 * 1024)))
return_0;
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (!(_cache.names = dm_hash_create(128)) ||
!(_cache.vgid_index = dm_hash_create(32)) ||
!(_cache.lvid_index = dm_hash_create(32))) {
dm_pool_destroy(_cache.mem);
_cache.mem = 0;
2008-01-30 16:19:47 +03:00
return_0;
}
2001-10-25 15:34:55 +04:00
if (!(_cache.devices = btree_create(_cache.mem))) {
log_error("Couldn't create binary tree for dev-cache.");
2001-10-25 15:34:55 +04:00
goto bad;
}
if (!(_cache.sysfs_only_devices = btree_create(_cache.mem))) {
log_error("Couldn't create binary tree for sysfs-only devices in dev cache.");
goto bad;
}
if (!(_cache.dev_dir = _strdup(cmd->dev_dir))) {
log_error("strdup dev_dir failed.");
goto bad;
}
dm_list_init(&_cache.dirs);
dm_list_init(&_cache.files);
2001-10-08 16:11:33 +04:00
if (!_init_preferred_names(cmd))
goto_bad;
return 1;
2001-10-25 15:34:55 +04:00
bad:
2001-10-25 15:34:55 +04:00
dev_cache_exit();
return 0;
}
/*
* Returns number of devices still open.
*/
static int _check_for_open_devices(int close_immediate)
{
struct device *dev;
struct dm_hash_node *n;
int num_open = 0;
dm_hash_iterate(n, _cache.names) {
dev = (struct device *) dm_hash_get_data(_cache.names, n);
if (dev->fd >= 0) {
log_error("Device '%s' has been left open (%d remaining references).",
dev_name(dev), dev->open_count);
num_open++;
if (close_immediate)
dev_close_immediate(dev);
}
}
return num_open;
}
/*
* Returns number of devices left open.
*/
int dev_cache_check_for_open_devices(void)
{
return _check_for_open_devices(0);
}
int dev_cache_exit(void)
{
int num_open = 0;
2001-10-25 15:34:55 +04:00
if (_cache.names)
if ((num_open = _check_for_open_devices(1)) > 0)
log_error(INTERNAL_ERROR "%d device(s) were left open and have been closed.", num_open);
if (_cache.mem)
dm_pool_destroy(_cache.mem);
if (_cache.names)
dm_hash_destroy(_cache.names);
dev: detect mismatch between devices used and devices assumed for an LV It's possible for an LVM LV to use a device during activation which then differs from device which LVM assumes based on metadata later on. For example, such device mismatch can occur if LVM doesn't have complete view of devices during activation or if filters are misbehaving or they're incorrectly set during activation. This patch adds code that can detect this mismatch by creating VG UUID and LV UUID index while scanning devices for device cache. The VG UUID index maps VG UUID to a device list. Each device in the list has a device layered above as a holder which is an LVM LV device and for which we know the VG UUID (and similarly for LV UUID index). We can acquire VG and LV UUID by reading /sys/block/<dm_dev_name>/dm/uuid. So these indices represent the actual state of PV device use in the system by LVs and then we compare that to what LVM assumes based on metadata. For example: [0] fedora/~ # lsblk /dev/sdq /dev/sdr /dev/sds /dev/sdt NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdq 65:0 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev1 253:3 0 104M 0 mpath sdr 65:16 0 104M 0 disk `-mpath_dev1 253:3 0 104M 0 mpath sds 65:32 0 104M 0 disk |-vg-lvol0 253:2 0 200M 0 lvm `-mpath_dev2 253:4 0 104M 0 mpath sdt 65:48 0 104M 0 disk `-mpath_dev2 253:4 0 104M 0 mpath In this case the vg-lvol0 is mapped onto sdq and sds becauset this is what was available and seen during activation. Then later on, sdr and sdt appeared and mpath devices were created out of sdq+sdr (mpath_dev1) and sds+sdt (mpath_dev2). Now, LVM assumes (correctly) that mpath_dev1 and mpath_dev2 are the PVs that should be used, not the mpath components (sdq/sdr, sds/sdt). [0] fedora/~ # pvs Found duplicate PV xSUix1GJ2SK82ACFuKzFLAQi8xMfFxnO: using /dev/mapper/mpath_dev1 not /dev/sdq Using duplicate PV /dev/mapper/mpath_dev1 from subsystem DM, replacing /dev/sdq Found duplicate PV MvHyMVabtSqr33AbkUrobq1LjP8oiTRm: using /dev/mapper/mpath_dev2 not /dev/sds Using duplicate PV /dev/mapper/mpath_dev2 from subsystem DM, ignoring /dev/sds WARNING: Device mismatch detected for vg/lvol0 which is accessing /dev/sdq, /dev/sds instead of /dev/mapper/mpath_dev1, /dev/mapper/mpath_dev2. PV VG Fmt Attr PSize PFree /dev/mapper/mpath_dev1 vg lvm2 a-- 100.00m 0 /dev/mapper/mpath_dev2 vg lvm2 a-- 100.00m 0
2016-03-16 16:01:26 +03:00
if (_cache.vgid_index)
dm_hash_destroy(_cache.vgid_index);
if (_cache.lvid_index)
dm_hash_destroy(_cache.lvid_index);
memset(&_cache, 0, sizeof(_cache));
return (!num_open);
}
int dev_cache_add_dir(const char *path)
{
struct dir_list *dl;
2001-10-23 15:50:49 +04:00
struct stat st;
if (stat(path, &st)) {
log_warn("Ignoring %s: %s.", path, strerror(errno));
2001-10-23 15:50:49 +04:00
/* But don't fail */
return 1;
}
if (!S_ISDIR(st.st_mode)) {
log_warn("Ignoring %s: Not a directory.", path);
2001-10-23 15:50:49 +04:00
return 1;
}
if (!(dl = _zalloc(sizeof(*dl) + strlen(path) + 1))) {
log_error("dir_list allocation failed");
return 0;
}
strcpy(dl->dir, path);
dm_list_add(&_cache.dirs, &dl->list);
return 1;
}
int dev_cache_add_loopfile(const char *path)
{
struct dir_list *dl;
struct stat st;
if (stat(path, &st)) {
log_warn("Ignoring %s: %s.", path, strerror(errno));
/* But don't fail */
return 1;
}
if (!S_ISREG(st.st_mode)) {
log_warn("Ignoring %s: Not a regular file.", path);
return 1;
}
if (!(dl = _zalloc(sizeof(*dl) + strlen(path) + 1))) {
log_error("dir_list allocation failed for file");
return 0;
}
strcpy(dl->dir, path);
dm_list_add(&_cache.files, &dl->list);
return 1;
}
/* Check cached device name is still valid before returning it */
/* This should be a rare occurrence */
/* set quiet if the cache is expected to be out-of-date */
/* FIXME Make rest of code pass/cache struct device instead of dev_name */
const char *dev_name_confirmed(struct device *dev, int quiet)
{
struct stat buf;
const char *name;
int r;
if ((dev->flags & DEV_REGULAR))
return dev_name(dev);
while ((r = stat(name = dm_list_item(dev->aliases.n,
struct dm_str_list)->str, &buf)) ||
(buf.st_rdev != dev->dev)) {
if (r < 0) {
if (quiet)
log_sys_debug("stat", name);
else
log_sys_error("stat", name);
}
if (quiet)
log_debug_devs("Path %s no longer valid for device(%d,%d)",
name, (int) MAJOR(dev->dev),
(int) MINOR(dev->dev));
else
log_warn("Path %s no longer valid for device(%d,%d)",
name, (int) MAJOR(dev->dev),
(int) MINOR(dev->dev));
/* Remove the incorrect hash entry */
dm_hash_remove(_cache.names, name);
/* Leave list alone if there isn't an alternative name */
/* so dev_name will always find something to return. */
/* Otherwise add the name to the correct device. */
if (dm_list_size(&dev->aliases) > 1) {
dm_list_del(dev->aliases.n);
if (!r)
_insert(name, &buf, 0, obtain_device_list_from_udev());
continue;
}
/* Scanning issues this inappropriately sometimes. */
log_debug_devs("Aborting - please provide new pathname for what "
"used to be %s", name);
return NULL;
}
return dev_name(dev);
}
struct device *dev_cache_get(const char *name, struct dev_filter *f)
{
struct stat buf;
struct device *d = (struct device *) dm_hash_lookup(_cache.names, name);
int info_available = 0;
2001-10-08 16:11:33 +04:00
if (d && (d->flags & DEV_REGULAR))
return d;
/* If the entry's wrong, remove it */
if (stat(name, &buf) < 0) {
if (d)
dm_hash_remove(_cache.names, name);
log_sys_very_verbose("stat", name);
cache: fix regression causing some PVs to bypass filters This is a regression introduced by commit 6c0e44d5a2e82aa160d48e83992e7ca342bc4bdf which changed the way dev_cache_get fn works - before this patch, when a device was not found, it fired a full rescan to correct the cache. However, the change coming with that commit missed this full_rescan call, causing the lvmcache to still contain info about PVs which should be filtered now. Such situation may have happened by coincidence of using old persistent cache (/etc/lvm/cache/.cache) which does not reflect the actual state anymore, a device name/symlink which now points to a device which should be filtered and a fact we keep info about usable DM devices in .cache no matter what the filter setting is. This bug could be hidden though by changes introduced in commit f1a000a477558e157532d5f2cd2f9c9139d4f87c as it calls full_rescan earlier before this problem is hit. But we need to fix this anyway for the dev_cache_get to be correct if we happen to use the same code path again somewhere sometime. For example, simple reproducer was (before commit 1a000a477558e157532d5f2cd2f9c9139d4f87c): - /dev/sda contains a PV header with UUID y5PzRD-RBAv-7sBx-V3SP-vDmy-DeSq-GUh65M - lvm.conf: filter = [ "r|.*|" ] - rm -f .cache (to start with clean state) - dmsetup create test --table "0 8388608 linear /dev/sda 0" (8388608 is just the size of the /dev/sda device I use in the reproducer) - pvs (this will create .cache file which contains "/dev/disk/by-id/lvm-pv-uuid-y5PzRD-RBAv-7sBx-V3SP-vDmy-DeSq-GUh65M" as well as "/dev/mapper/test" and the target node "/dev/dm-1" - all the usable DM mappings (and their symlinks) get into the .cache file even though the filter "is set to "ignore all" - we do this - so far it's OK) - dmsetup remove test (so we end up with /dev/disk/by-id/lvm-pv-uuid-... pointing to the /dev/sda now since it's the underlying device containing the actual PV header) - now calling "pvs" with such .cache file and we get: $ pvs PV VG Fmt Attr PSize PFree /dev/disk/by-id/lvm-pv-uuid-y5PzRD-RBAv-7sBx-V3SP-vDmy-DeSq-GUh65M vg lvm2 a-- 4.00g 0 Even though we have set filter = [ "r|.*|" ] in the lvm.conf file!
2015-07-29 10:43:03 +03:00
d = NULL;
} else
info_available = 1;
if (d && (buf.st_rdev != d->dev)) {
dm_hash_remove(_cache.names, name);
d = NULL;
}
2001-10-08 17:58:52 +04:00
if (!d) {
_insert(name, info_available ? &buf : NULL, 0, obtain_device_list_from_udev());
d = (struct device *) dm_hash_lookup(_cache.names, name);
if (!d) {
_full_scan(0);
d = (struct device *) dm_hash_lookup(_cache.names, name);
}
2001-10-08 17:58:52 +04:00
}
2001-10-08 16:11:33 +04:00
if (!d || (f && !(d->flags & DEV_REGULAR) && !(f->passes_filter(f, d))))
return NULL;
log_debug_devs("Using %s", dev_name(d));
return d;
}
static struct device *_dev_cache_seek_devt(dev_t dev)
{
struct device *d = NULL;
struct dm_hash_node *n = dm_hash_get_first(_cache.names);
while (n) {
d = dm_hash_get_data(_cache.names, n);
if (d->dev == dev)
return d;
n = dm_hash_get_next(_cache.names, n);
}
return NULL;
}
/*
* TODO This is very inefficient. We probably want a hash table indexed by
* major:minor for keys to speed up these lookups.
*/
struct device *dev_cache_get_by_devt(dev_t dev, struct dev_filter *f)
{
char path[PATH_MAX];
const char *sysfs_dir;
struct stat info;
struct device *d = _dev_cache_seek_devt(dev);
if (d && (d->flags & DEV_REGULAR))
return d;
if (!d) {
sysfs_dir = dm_sysfs_dir();
if (sysfs_dir && *sysfs_dir) {
/* First check if dev is sysfs to avoid useless scan */
if (dm_snprintf(path, sizeof(path), "%s/dev/block/%d:%d",
sysfs_dir, (int)MAJOR(dev), (int)MINOR(dev)) < 0) {
log_error("dm_snprintf partition failed.");
return NULL;
}
if (lstat(path, &info)) {
log_debug("No sysfs entry for %d:%d.",
(int)MAJOR(dev), (int)MINOR(dev));
return NULL;
}
}
_full_scan(0);
d = _dev_cache_seek_devt(dev);
}
return (d && (!f || (d->flags & DEV_REGULAR) ||
f->passes_filter(f, d))) ? d : NULL;
}
void dev_cache_full_scan(struct dev_filter *f)
{
if (f && f->wipe) {
f->wipe(f); /* might call _full_scan(1) */
if (!full_scan_done())
_full_scan(1);
} else
_full_scan(1);
}
struct dev_iter *dev_iter_create(struct dev_filter *f, int dev_scan)
{
struct dev_iter *di = dm_malloc(sizeof(*di));
if (!di) {
log_error("dev_iter allocation failed");
return NULL;
}
if (dev_scan && !trust_cache()) {
/* Flag gets reset between each command */
if (!full_scan_done())
dev_cache_full_scan(f);
} else
_full_scan(0);
2001-10-25 15:34:55 +04:00
di->current = btree_first(_cache.devices);
di->filter = f;
if (di->filter)
di->filter->use_count++;
return di;
}
void dev_iter_destroy(struct dev_iter *iter)
{
if (iter->filter)
iter->filter->use_count--;
dm_free(iter);
}
static struct device *_iter_next(struct dev_iter *iter)
{
2001-10-25 15:34:55 +04:00
struct device *d = btree_get_data(iter->current);
iter->current = btree_next(iter->current);
return d;
}
struct device *dev_iter_get(struct dev_iter *iter)
{
while (iter->current) {
struct device *d = _iter_next(iter);
if (!iter->filter || (d->flags & DEV_REGULAR) ||
iter->filter->passes_filter(iter->filter, d)) {
log_debug_devs("Using %s", dev_name(d));
return d;
}
}
return NULL;
}
void dev_reset_error_count(struct cmd_context *cmd)
{
struct dev_iter iter;
if (!_cache.devices)
return;
iter.current = btree_first(_cache.devices);
while (iter.current)
_iter_next(&iter)->error_count = 0;
}
int dev_fd(struct device *dev)
{
return dev->fd;
}
const char *dev_name(const struct device *dev)
{
return (dev && dev->aliases.n) ? dm_list_item(dev->aliases.n, struct dm_str_list)->str :
unknown_device_name();
}