1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-03-10 16:58:47 +03:00

Merge branch 'master' into 2018-04-30-vdo-support

This commit is contained in:
Joe Thornber 2018-05-04 13:32:07 +01:00
commit d2840b0ec1
27 changed files with 1037 additions and 436 deletions

View File

@ -1,5 +1,6 @@
Version 2.02.178 -
=====================================
lvconvert: don't return success on degraded -m raid1 conversion
--enable-testing switch for ./configure has been removed.
--with-snapshots switch for ./configure has been removed.
--with-mirrors switch for ./configure has been removed.

30
configure vendored
View File

@ -663,7 +663,6 @@ SBINDIR
REPLICATORS
READLINE_LIBS
RT_LIBS
RAID
PYTHON3DIR
PYTHON2DIR
PYTHON3_LIBDIRS
@ -912,7 +911,6 @@ with_default_name_mangling
with_cluster
with_snapshots
with_mirrors
with_raid
with_default_mirror_segtype
with_default_raid10_segtype
with_default_sparse_segtype
@ -1745,7 +1743,6 @@ Optional Packages:
[internal]
--with-snapshots=TYPE snapshot support: internal/shared/none [internal]
--with-mirrors=TYPE mirror support: internal/shared/none [internal]
--with-raid=TYPE raid support: internal/shared/none [internal]
--with-default-mirror-segtype=TYPE
default mirror segtype: raid1/mirror [raid1]
--with-default-raid10-segtype=TYPE
@ -8439,19 +8436,6 @@ $as_echo "#define MIRRORED_INTERNAL 1" >>confdefs.h
esac
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include raid" >&5
$as_echo_n "checking whether to include raid... " >&6; }
# Check whether --with-raid was given.
if test "${with_raid+set}" = set; then :
withval=$with_raid; RAID=$withval
else
RAID=internal
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $RAID" >&5
$as_echo "$RAID" >&6; }
# Check whether --with-default-mirror-segtype was given.
if test "${with_default_mirror_segtype+set}" = set; then :
@ -8468,15 +8452,10 @@ else
DEFAULT_RAID10_SEGTYPE="raid10"
fi
case "$RAID" in
none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror"
test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;;
shared) ;;
internal)
$as_echo "#define RAID_INTERNAL 1" >>confdefs.h
;;
*) as_fn_error $? "--with-raid parameter invalid" "$LINENO" 5 ;;
esac
cat >>confdefs.h <<_ACEOF
@ -13883,8 +13862,6 @@ fi
################################################################################
if [ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
-o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \
-o "$RAID" = shared -o "$CACHE" = shared \
\) -a "$STATIC_LINK" = yes ]; then
as_fn_error $? "Features cannot be 'shared' when building statically" "$LINENO" 5
fi
@ -15579,7 +15556,6 @@ _ACEOF
################################################################################

View File

@ -332,13 +332,6 @@ esac
################################################################################
dnl -- raid inclusion type
AC_MSG_CHECKING(whether to include raid)
AC_ARG_WITH(raid,
AC_HELP_STRING([--with-raid=TYPE],
[raid support: internal/shared/none [internal]]),
RAID=$withval, RAID=internal)
AC_MSG_RESULT($RAID)
AC_ARG_WITH(default-mirror-segtype,
AC_HELP_STRING([--with-default-mirror-segtype=TYPE],
[default mirror segtype: raid1/mirror [raid1]]),
@ -347,14 +340,9 @@ AC_ARG_WITH(default-raid10-segtype,
AC_HELP_STRING([--with-default-raid10-segtype=TYPE],
[default mirror segtype: raid10/mirror [raid10]]),
DEFAULT_RAID10_SEGTYPE=$withval, DEFAULT_RAID10_SEGTYPE="raid10")
case "$RAID" in
none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror"
test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;;
shared) ;;
internal) AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.]) ;;
*) AC_MSG_ERROR([--with-raid parameter invalid]) ;;
esac
AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.])
AC_DEFINE_UNQUOTED([DEFAULT_MIRROR_SEGTYPE], ["$DEFAULT_MIRROR_SEGTYPE"],
[Default segtype used for mirror volumes.])
@ -1551,8 +1539,6 @@ AC_CHECK_LIB(dl, dlopen,
################################################################################
dnl -- Check for shared/static conflicts
if [[ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
-o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \
-o "$RAID" = shared -o "$CACHE" = shared \
\) -a "$STATIC_LINK" = yes ]]; then
AC_MSG_ERROR([Features cannot be 'shared' when building statically])
fi
@ -2042,7 +2028,6 @@ AC_SUBST(PYTHON2DIR)
AC_SUBST(PYTHON3DIR)
AC_SUBST(QUORUM_CFLAGS)
AC_SUBST(QUORUM_LIBS)
AC_SUBST(RAID)
AC_SUBST(RT_LIBS)
AC_SUBST(READLINE_LIBS)
AC_SUBST(REPLICATORS)

View File

@ -903,7 +903,6 @@ int init_clvm(struct dm_hash_table *excl_uuid)
return 0;
}
cmd->is_clvmd = 1;
cmd->cmd_line = "clvmd";
/* Check lvm.conf is setup for cluster-LVM */

View File

@ -14,7 +14,6 @@
#define _LVM_CLOG_LOGGING_H
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "configure.h"
#include <stdio.h>

View File

@ -29,6 +29,7 @@ SOURCES =\
datastruct/btree.c \
datastruct/str_list.c \
device/bcache.c \
device/bcache-utils.c \
device/dev-cache.c \
device/dev-ext.c \
device/dev-io.c \

View File

@ -2161,36 +2161,40 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
if (lv && lv_pre)
goto skip_read;
vg = lvmcache_get_saved_vg(vgid, 0);
vg_pre = lvmcache_get_saved_vg(vgid, 1);
if (!vg || !vg_pre) {
log_debug("lv_suspend dropping both saved vgs and rereading");
lvmcache_drop_saved_vgid(vgid);
vg = vg_read_by_vgid(cmd, vgid, 0);
vg_pre = vg_read_by_vgid(cmd, vgid, 1);
if (!vg || !vg_pre) {
log_error("lv_suspend could not find vgid %.8s vg %p vg_pre %p",
vgid, vg, vg_pre);
if (!(vg = lvmcache_get_saved_vg(vgid, 0))) {
log_debug("lv_suspend did not find saved_vg %.8s so reading", vgid);
if (!(vg = vg_read_by_vgid(cmd, vgid, 0))) {
log_error("lv_suspend could not read vgid %.8s", vgid);
goto out;
}
log_debug("lv_suspend using read vg %s %d %p", vg->name, vg->seqno, vg);
} else {
log_debug("lv_suspend using saved_vg %s %d %p", vg->name, vg->seqno, vg);
}
/*
* Note that vg and vg_pre returned by vg_read_by_vgid will
* not be the same as saved_vg_old/saved_vg_new that would
* be returned by lvmcache_get_saved_vg() because the saved_vg's
* are copies of the vg struct that is created by _vg_read.
* (Should we grab and use the saved_vg to use here instead of
* the vg returned by vg_read_by_vgid?)
*/
if ((vg->status & EXPORTED_VG) || (vg_pre->status & EXPORTED_VG)) {
log_error("Volume group \"%s\" is exported", vg->name);
if (!(vg_pre = lvmcache_get_saved_vg(vgid, 1))) {
log_debug("lv_suspend did not find pre saved_vg %.8s so reading", vgid);
if (!(vg_pre = vg_read_by_vgid(cmd, vgid, 1))) {
log_error("lv_suspend could not read pre vgid %.8s", vgid);
goto out;
}
log_debug("lv_suspend using pre read vg %s %d %p", vg_pre->name, vg_pre->seqno, vg_pre);
} else {
log_debug("lv_suspend using pre saved_vg %s %d %p", vg_pre->name, vg_pre->seqno, vg_pre);
}
/*
* Note that vg and vg_pre returned by vg_read_by_vgid will
* not be the same as saved_vg_old/saved_vg_new that would
* be returned by lvmcache_get_saved_vg() because the saved_vg's
* are copies of the vg struct that is created by _vg_read.
* (Should we grab and use the saved_vg to use here instead of
* the vg returned by vg_read_by_vgid?)
*/
if ((vg->status & EXPORTED_VG) || (vg_pre->status & EXPORTED_VG)) {
log_error("Volume group \"%s\" is exported", vg->name);
goto out;
}
lv = lv_to_free = find_lv_in_vg_by_lvid(vg, lvid);

388
lib/cache/lvmcache.c vendored
View File

@ -63,11 +63,12 @@ struct lvmcache_vginfo {
int seqno;
int independent_metadata_location; /* metadata read from independent areas */
int scan_summary_mismatch; /* vgsummary from devs had mismatching seqno or checksum */
};
struct saved_vg {
/*
* The following are not related to lvmcache or vginfo,
* but are borrowing the vginfo to store the data.
* saved_vg_* are used only by clvmd.
* It is not related to lvmcache or vginfo.
*
* For activation/deactivation, these are used to avoid
* clvmd rereading a VG for each LV that is activated.
@ -83,26 +84,19 @@ struct lvmcache_vginfo {
*
* saved_vg_committed is set to 1 when clvmd gets
* LCK_VG_COMMIT from vg_commit().
*
* This data does not really belong in lvmcache, it's unrelated
* to lvmcache or vginfo, but it's just a convenient place
* for clvmd to stash the VG (since the same caller isn't
* present to pass the VG to both suspend and resume in the
* case of clvmd.)
*/
char vgid[ID_LEN + 1];
int saved_vg_committed;
char *saved_vg_old_buf;
struct dm_config_tree *saved_vg_old_cft;
struct volume_group *saved_vg_old;
char *saved_vg_new_buf;
struct dm_config_tree *saved_vg_new_cft;
struct volume_group *saved_vg_new;
struct dm_list saved_vg_to_free;
};
static struct dm_hash_table *_pvid_hash = NULL;
static struct dm_hash_table *_vgid_hash = NULL;
static struct dm_hash_table *_vgname_hash = NULL;
static struct dm_hash_table *_lock_hash = NULL;
static struct dm_hash_table *_saved_vg_hash = NULL;
static DM_LIST_INIT(_vginfos);
static DM_LIST_INIT(_found_duplicate_devs);
static DM_LIST_INIT(_unused_duplicate_devs);
@ -113,7 +107,7 @@ static int _vg_global_lock_held = 0; /* Global lock held when cache wiped? */
static int _found_duplicate_pvs = 0; /* If we never see a duplicate PV we can skip checking for them later. */
static int _suppress_lock_ordering = 0;
int lvmcache_init(void)
int lvmcache_init(struct cmd_context *cmd)
{
/*
* FIXME add a proper lvmcache_locking_reset() that
@ -137,6 +131,11 @@ int lvmcache_init(void)
if (!(_lock_hash = dm_hash_create(128)))
return 0;
if (cmd->is_clvmd) {
if (!(_saved_vg_hash = dm_hash_create(128)))
return 0;
}
/*
* Reinitialising the cache clears the internal record of
* which locks are held. The global lock can be held during
@ -192,136 +191,205 @@ static void _update_cache_lock_state(const char *vgname, int locked)
_update_cache_vginfo_lock_state(vginfo, locked);
}
static void _saved_vg_free(struct lvmcache_vginfo *vginfo, int free_old, int free_new)
static struct saved_vg *_saved_vg_from_vgid(const char *vgid)
{
if (free_old) {
if (vginfo->saved_vg_old) {
log_debug_cache("lvmcache: free saved_vg %s old %p",
vginfo->saved_vg_old->name,
vginfo->saved_vg_old);
struct saved_vg *svg;
char id[ID_LEN + 1] __attribute__((aligned(8)));
vginfo->saved_vg_old->saved_in_clvmd = 0;
/* vgid not necessarily NULL-terminated */
(void) dm_strncpy(id, vgid, sizeof(id));
if (!(svg = dm_hash_lookup(_saved_vg_hash, id))) {
log_debug_cache("lvmcache: no saved_vg for vgid \"%s\"", id);
return NULL;
}
return svg;
}
static void _saved_vg_inval(struct saved_vg *svg, int inval_old, int inval_new)
{
struct vg_list *vgl;
/*
* In practice there appears to only ever be a single invalidated vg,
* so making saved_vg_to_free a list instead of a pointer is overkill.
* But, without proof otherwise, safer to keep the list.
*/
if (inval_old && svg->saved_vg_old) {
log_debug_cache("lvmcache: inval saved_vg %s old %p",
svg->saved_vg_old->name, svg->saved_vg_old);
if ((vgl = dm_zalloc(sizeof(*vgl)))) {
vgl->vg = svg->saved_vg_old;
dm_list_add(&svg->saved_vg_to_free, &vgl->list);
}
if (vginfo->saved_vg_old_buf)
dm_free(vginfo->saved_vg_old_buf);
if (vginfo->saved_vg_old_cft)
dm_config_destroy(vginfo->saved_vg_old_cft);
if (vginfo->saved_vg_old)
release_vg(vginfo->saved_vg_old);
svg->saved_vg_old = NULL;
}
vginfo->saved_vg_old_buf = NULL;
vginfo->saved_vg_old_cft = NULL;
vginfo->saved_vg_old = NULL;
if (inval_new && svg->saved_vg_new) {
log_debug_cache("lvmcache: inval saved_vg %s new pre %p",
svg->saved_vg_new->name, svg->saved_vg_new);
if ((vgl = dm_zalloc(sizeof(*vgl)))) {
vgl->vg = svg->saved_vg_new;
dm_list_add(&svg->saved_vg_to_free, &vgl->list);
}
svg->saved_vg_new = NULL;
}
}
static void _saved_vg_free(struct saved_vg *svg, int free_old, int free_new)
{
struct vg_list *vgl, *vgl2;
struct volume_group *vg;
if (free_old) {
if ((vg = svg->saved_vg_old)) {
log_debug_cache("lvmcache: free saved_vg old %s %.8s %d old %p",
vg->name, (char *)&vg->id, vg->seqno, vg);
vg->saved_in_clvmd = 0;
release_vg(vg);
svg->saved_vg_old = NULL;
vg = NULL;
}
dm_list_iterate_items_safe(vgl, vgl2, &svg->saved_vg_to_free) {
log_debug_cache("lvmcache: free saved_vg_to_free %s %.8s %d %p",
vgl->vg->name, (char *)&vgl->vg->id, vgl->vg->seqno, vgl->vg);
dm_list_del(&vgl->list);
vgl->vg->saved_in_clvmd = 0;
release_vg(vgl->vg);
}
}
if (free_new) {
if (vginfo->saved_vg_new) {
log_debug_cache("lvmcache: free saved_vg %s new pre %p",
vginfo->saved_vg_new->name,
vginfo->saved_vg_new);
if ((vg = svg->saved_vg_new)) {
log_debug_cache("lvmcache: free saved_vg pre %s %.8s %d %p",
vg->name, (char *)&vg->id, vg->seqno, vg);
vginfo->saved_vg_new->saved_in_clvmd = 0;
vg->saved_in_clvmd = 0;
release_vg(vg);
svg->saved_vg_new = NULL;
vg = NULL;
}
if (vginfo->saved_vg_new_buf)
dm_free(vginfo->saved_vg_new_buf);
if (vginfo->saved_vg_new_cft)
dm_config_destroy(vginfo->saved_vg_new_cft);
if (vginfo->saved_vg_new)
release_vg(vginfo->saved_vg_new);
vginfo->saved_vg_new_buf = NULL;
vginfo->saved_vg_new_cft = NULL;
vginfo->saved_vg_new = NULL;
}
}
static void _drop_metadata(const char *vgname, int drop_precommitted)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
return;
if (!(svg = _saved_vg_from_vgid(vginfo->vgid)))
return;
if (drop_precommitted)
_saved_vg_free(vginfo, 0, 1);
_saved_vg_free(svg, 0, 1);
else
_saved_vg_free(vginfo, 1, 1);
_saved_vg_free(svg, 1, 1);
}
void lvmcache_save_vg(struct volume_group *vg, int precommitted)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
struct format_instance *fid;
struct format_instance_ctx fic;
struct volume_group *save_vg = NULL;
struct dm_config_tree *susp_cft = NULL;
char *susp_buf = NULL;
struct dm_config_tree *save_cft = NULL;
const struct format_type *fmt;
char *save_buf = NULL;
size_t size;
int new = precommitted;
int old = !precommitted;
if (!(vginfo = lvmcache_vginfo_from_vgid((const char *)&vg->id)))
goto_bad;
/* already saved */
if (old && vginfo->saved_vg_old &&
(vginfo->saved_vg_old->seqno == vg->seqno))
return;
/* already saved */
if (new && vginfo->saved_vg_new &&
(vginfo->saved_vg_new->seqno == vg->seqno))
return;
_saved_vg_free(vginfo, old, new);
if (!(size = export_vg_to_buffer(vg, &susp_buf)))
if (!(svg = _saved_vg_from_vgid((const char *)&vg->id))) {
/* Nothing is saved yet for this vg */
if (!(svg = dm_zalloc(sizeof(*svg))))
return;
dm_list_init(&svg->saved_vg_to_free);
dm_strncpy(svg->vgid, (const char *)vg->id.uuid, sizeof(svg->vgid));
if (!dm_hash_insert(_saved_vg_hash, svg->vgid, svg)) {
log_error("lvmcache: failed to insert saved_vg %s", svg->vgid);
return;
}
} else {
/* Nothing to do if we've already saved this seqno */
if (old && svg->saved_vg_old && (svg->saved_vg_old->seqno == vg->seqno))
return;
if (new && svg->saved_vg_new && (svg->saved_vg_new->seqno == vg->seqno))
return;
/* Invalidate the existing saved_vg that will be replaced */
_saved_vg_inval(svg, old, new);
}
if (!(size = export_vg_to_buffer(vg, &save_buf)))
goto_bad;
fmt = vg->fid->fmt;
fic.type = FMT_INSTANCE_MDAS | FMT_INSTANCE_AUX_MDAS;
fic.context.vg_ref.vg_name = vginfo->vgname;
fic.context.vg_ref.vg_id = vginfo->vgid;
if (!(fid = vginfo->fmt->ops->create_instance(vginfo->fmt, &fic)))
fic.context.vg_ref.vg_name = vg->name;
fic.context.vg_ref.vg_id = svg->vgid;
if (!(fid = fmt->ops->create_instance(fmt, &fic)))
goto_bad;
if (!(susp_cft = config_tree_from_string_without_dup_node_check(susp_buf)))
if (!(save_cft = config_tree_from_string_without_dup_node_check(save_buf)))
goto_bad;
if (!(save_vg = import_vg_from_config_tree(susp_cft, fid)))
if (!(save_vg = import_vg_from_config_tree(save_cft, fid)))
goto_bad;
dm_free(save_buf);
dm_config_destroy(save_cft);
save_vg->saved_in_clvmd = 1;
if (old) {
vginfo->saved_vg_old_buf = susp_buf;
vginfo->saved_vg_old_cft = susp_cft;
vginfo->saved_vg_old = save_vg;
log_debug_cache("lvmcache saved old vg %s seqno %d %p",
svg->saved_vg_old = save_vg;
log_debug_cache("lvmcache: saved old vg %s seqno %d %p",
save_vg->name, save_vg->seqno, save_vg);
} else {
vginfo->saved_vg_new_buf = susp_buf;
vginfo->saved_vg_new_cft = susp_cft;
vginfo->saved_vg_new = save_vg;
log_debug_cache("lvmcache saved pre vg %s seqno %d %p",
svg->saved_vg_new = save_vg;
log_debug_cache("lvmcache: saved pre vg %s seqno %d %p",
save_vg->name, save_vg->seqno, save_vg);
}
return;
bad:
_saved_vg_free(vginfo, old, new);
log_debug_cache("lvmcache failed to save pre %d vg %s", precommitted, vg->name);
if (save_buf)
dm_free(save_buf);
if (save_cft)
dm_config_destroy(save_cft);
_saved_vg_inval(svg, old, new);
log_debug_cache("lvmcache: failed to save pre %d vg %s", precommitted, vg->name);
}
struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
struct volume_group *vg = NULL;
int new = precommitted;
int old = !precommitted;
if (!(vginfo = lvmcache_vginfo_from_vgid(vgid)))
if (!(svg = _saved_vg_from_vgid(vgid)))
goto out;
/*
@ -330,130 +398,116 @@ struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted)
*/
if (new)
vg = vginfo->saved_vg_new;
vg = svg->saved_vg_new;
else if (old)
vg = vginfo->saved_vg_old;
vg = svg->saved_vg_old;
if (vg && old) {
if (!vginfo->saved_vg_new)
if (!svg->saved_vg_new)
log_debug_cache("lvmcache: get old saved_vg %d %s %p",
vg->seqno, vg->name, vg);
else
log_debug_cache("lvmcache: get old saved_vg %d %s %p new is %d %p",
vg->seqno, vg->name, vg,
vginfo->saved_vg_new->seqno,
vginfo->saved_vg_new);
svg->saved_vg_new->seqno,
svg->saved_vg_new);
}
if (vg && new) {
if (!vginfo->saved_vg_old)
log_debug_cache("lvmcache: get new (pre) saved_vg %d %s %p",
if (!svg->saved_vg_old)
log_debug_cache("lvmcache: get new saved_vg %d %s %p",
vg->seqno, vg->name, vg);
else
log_debug_cache("lvmcache: get new (pre) saved_vg %d %s %p old is %d %p",
log_debug_cache("lvmcache: get new saved_vg %d %s %p old is %d %p",
vg->seqno, vg->name, vg,
vginfo->saved_vg_old->seqno,
vginfo->saved_vg_old);
svg->saved_vg_old->seqno,
svg->saved_vg_old);
/* Do we need to actually set saved_vg_old to match saved_vg_new?
* By just dropping old, we force a subsequent request for old to
* reread it rather than just using new. */
if (svg->saved_vg_old && (svg->saved_vg_old->seqno < vg->seqno)) {
log_debug_cache("lvmcache: inval saved_vg_old %d %p for new %d %p %s",
svg->saved_vg_old->seqno, svg->saved_vg_old,
vg->seqno, vg, vg->name);
if (vginfo->saved_vg_old && (vginfo->saved_vg_old->seqno < vg->seqno)) {
log_debug_cache("lvmcache: drop saved_vg_old because new invalidates");
_saved_vg_free(vginfo, 1, 0);
_saved_vg_inval(svg, 1, 0);
}
}
if (!vg && new && vginfo->saved_vg_old)
if (!vg && new && svg->saved_vg_old)
log_warn("lvmcache_get_saved_vg pre %d wanted new but only have old %d %s",
precommitted,
vginfo->saved_vg_old->seqno,
vginfo->saved_vg_old->name);
svg->saved_vg_old->seqno,
svg->saved_vg_old->name);
if (!vg && old && vginfo->saved_vg_new)
if (!vg && old && svg->saved_vg_new)
log_warn("lvmcache_get_saved_vg pre %d wanted old but only have new %d %s",
precommitted,
vginfo->saved_vg_new->seqno,
vginfo->saved_vg_new->name);
svg->saved_vg_new->seqno,
svg->saved_vg_new->name);
out:
if (!vg)
log_debug_cache("lvmcache no saved vg %s pre %d", vgid, precommitted);
log_debug_cache("lvmcache: no saved pre %d %s", precommitted, vgid);
return vg;
}
struct volume_group *lvmcache_get_saved_vg_latest(const char *vgid)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
struct volume_group *vg = NULL;
int old = 0;
int new = 0;
if (!(vginfo = lvmcache_vginfo_from_vgid(vgid)))
if (!(svg = _saved_vg_from_vgid(vgid)))
goto out;
if (vginfo->saved_vg_committed) {
vg = vginfo->saved_vg_new;
if (svg->saved_vg_committed) {
vg = svg->saved_vg_new;
new = 1;
} else {
vg = vginfo->saved_vg_old;
vg = svg->saved_vg_old;
old = 1;
}
if (vg && old) {
if (!vginfo->saved_vg_new)
if (!svg->saved_vg_new)
log_debug_cache("lvmcache: get_latest old saved_vg %d %s %p",
vg->seqno, vg->name, vg);
else
log_debug_cache("lvmcache: get_latest old saved_vg %d %s %p new is %d %p",
vg->seqno, vg->name, vg,
vginfo->saved_vg_new->seqno,
vginfo->saved_vg_new);
svg->saved_vg_new->seqno,
svg->saved_vg_new);
}
if (vg && new) {
if (!vginfo->saved_vg_old)
log_debug_cache("lvmcache: get_latest new (pre) saved_vg %d %s %p",
if (!svg->saved_vg_old)
log_debug_cache("lvmcache: get_latest new saved_vg %d %s %p",
vg->seqno, vg->name, vg);
else
log_debug_cache("lvmcache: get_latest new (pre) saved_vg %d %s %p old is %d %p",
log_debug_cache("lvmcache: get_latest new saved_vg %d %s %p old is %d %p",
vg->seqno, vg->name, vg,
vginfo->saved_vg_old->seqno,
vginfo->saved_vg_old);
svg->saved_vg_old->seqno,
svg->saved_vg_old);
/* Do we need to actually set saved_vg_old to match saved_vg_new?
* By just dropping old, we force a subsequent request for old to
* reread it rather than just using new. */
if (svg->saved_vg_old && (svg->saved_vg_old->seqno < vg->seqno)) {
log_debug_cache("lvmcache: inval saved_vg_old %d %p for new %d %p %s",
svg->saved_vg_old->seqno, svg->saved_vg_old,
vg->seqno, vg, vg->name);
if (vginfo->saved_vg_old && (vginfo->saved_vg_old->seqno < vg->seqno)) {
log_debug_cache("lvmcache: drop saved_vg_old because new invalidates");
_saved_vg_free(vginfo, 1, 0);
_saved_vg_inval(svg, 1, 0);
}
}
out:
if (!vg)
log_debug_cache("lvmcache no saved vg %s", vgid);
log_debug_cache("lvmcache: no saved vg latest %s", vgid);
return vg;
}
void lvmcache_drop_saved_vg(struct volume_group *vg)
{
struct lvmcache_vginfo *vginfo;
if (!(vginfo = lvmcache_vginfo_from_vgid((const char *)&vg->id)))
return;
_saved_vg_free(vginfo, 1, 1);
}
void lvmcache_drop_saved_vgid(const char *vgid)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
if (!(vginfo = lvmcache_vginfo_from_vgid(vgid)))
return;
_saved_vg_free(vginfo, 1, 1);
if ((svg = _saved_vg_from_vgid(vgid)))
_saved_vg_inval(svg, 1, 1);
}
/*
@ -464,15 +518,20 @@ void lvmcache_drop_saved_vgid(const char *vgid)
void lvmcache_commit_metadata(const char *vgname)
{
struct lvmcache_vginfo *vginfo;
struct saved_vg *svg;
if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, NULL)))
return;
vginfo->saved_vg_committed = 1;
if ((svg = _saved_vg_from_vgid(vginfo->vgid)))
svg->saved_vg_committed = 1;
}
void lvmcache_drop_metadata(const char *vgname, int drop_precommitted)
{
if (!_saved_vg_hash)
return;
if (lvmcache_vgname_is_locked(VG_GLOBAL))
return;
@ -550,11 +609,6 @@ int lvmcache_verify_lock_order(const char *vgname)
void lvmcache_lock_vgname(const char *vgname, int read_only __attribute__((unused)))
{
if (!_lock_hash && !lvmcache_init()) {
log_error("Internal cache initialisation failed");
return;
}
if (dm_hash_lookup(_lock_hash, vgname))
log_error(INTERNAL_ERROR "Nested locking attempted on VG %s.",
vgname);
@ -1268,6 +1322,11 @@ int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const
label_scan_devs(cmd, &devs);
if (!(vginfo = lvmcache_vginfo_from_vgname(vgname, vgid))) {
log_warn("VG info not found after rescan of %s", vgname);
return 0;
}
return 1;
}
@ -1321,11 +1380,6 @@ int lvmcache_label_scan(struct cmd_context *cmd)
_scanning_in_progress = 1;
if (!_vgname_hash && !lvmcache_init()) {
log_error("Internal cache initialisation failed");
goto out;
}
/* FIXME: can this happen? */
if (!cmd->full_filter) {
log_error("label scan is missing full filter");
@ -1620,7 +1674,6 @@ static int _free_vginfo(struct lvmcache_vginfo *vginfo)
dm_free(vginfo->system_id);
dm_free(vginfo->vgname);
dm_free(vginfo->creation_host);
_saved_vg_free(vginfo, 1, 1);
if (*vginfo->vgid && _vgid_hash &&
lvmcache_vginfo_from_vgid(vginfo->vgid) == vginfo)
@ -1956,11 +2009,6 @@ out:
int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt)
{
if (!_lock_hash && !lvmcache_init()) {
log_error("Internal cache initialisation failed");
return 0;
}
return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt);
}
@ -2330,6 +2378,11 @@ static void _lvmcache_destroy_lockname(struct dm_hash_node *n)
dm_hash_get_key(_lock_hash, n));
}
static void _destroy_saved_vg(struct saved_vg *svg)
{
_saved_vg_free(svg, 1, 1);
}
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
{
struct dm_hash_node *n;
@ -2366,6 +2419,12 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
_lock_hash = NULL;
}
if (_saved_vg_hash) {
dm_hash_iter(_saved_vg_hash, (dm_hash_iterate_fn) _destroy_saved_vg);
dm_hash_destroy(_saved_vg_hash);
_saved_vg_hash = NULL;
}
if (!dm_list_empty(&_vginfos))
log_error(INTERNAL_ERROR "_vginfos list should be empty");
dm_list_init(&_vginfos);
@ -2386,9 +2445,16 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
_destroy_duplicate_device_list(&_found_duplicate_devs); /* should be empty anyway */
_found_duplicate_pvs = 0;
if (retain_orphans)
if (!init_lvmcache_orphans(cmd))
stack;
if (retain_orphans) {
struct format_type *fmt;
lvmcache_init(cmd);
dm_list_iterate_items(fmt, &cmd->formats) {
if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt))
stack;
}
}
}
int lvmcache_pvid_is_locked(const char *pvid) {

11
lib/cache/lvmcache.h vendored
View File

@ -63,7 +63,7 @@ struct lvmcache_vgsummary {
int seqno;
};
int lvmcache_init(void);
int lvmcache_init(struct cmd_context *cmd);
void lvmcache_allow_reads_with_lvmetad(void);
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset);
@ -213,12 +213,15 @@ int lvmcache_get_vg_devs(struct cmd_context *cmd,
struct dm_list *devs);
void lvmcache_set_independent_location(const char *vgname);
int lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
/*
* These are clvmd-specific functions and are not related to lvmcache.
* FIXME: rename these with a clvm_ prefix in place of lvmcache_
*/
void lvmcache_save_vg(struct volume_group *vg, int precommitted);
struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted);
struct volume_group *lvmcache_get_saved_vg_latest(const char *vgid);
void lvmcache_drop_saved_vg(struct volume_group *vg);
void lvmcache_drop_saved_vgid(const char *vgid);
int lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
#endif

View File

@ -1053,7 +1053,7 @@ static int _init_dev_cache(struct cmd_context *cmd)
return 1;
}
#define MAX_FILTERS 9
#define MAX_FILTERS 10
static struct dev_filter *_init_lvmetad_filter_chain(struct cmd_context *cmd)
{
@ -1814,7 +1814,7 @@ out:
}
/* Entry point */
struct cmd_context *create_toolcontext(unsigned is_long_lived,
struct cmd_context *create_toolcontext(unsigned is_clvmd,
const char *system_dir,
unsigned set_buffering,
unsigned threaded,
@ -1841,7 +1841,8 @@ struct cmd_context *create_toolcontext(unsigned is_long_lived,
log_error("Failed to allocate command context");
return NULL;
}
cmd->is_long_lived = is_long_lived;
cmd->is_long_lived = is_clvmd;
cmd->is_clvmd = is_clvmd;
cmd->threaded = threaded ? 1 : 0;
cmd->handles_missing_pvs = 0;
cmd->handles_unknown_segments = 0;
@ -1970,6 +1971,10 @@ struct cmd_context *create_toolcontext(unsigned is_long_lived,
if (!_init_formats(cmd))
goto_out;
if (!lvmcache_init(cmd))
goto_out;
/* FIXME: move into lvmcache_init */
if (!init_lvmcache_orphans(cmd))
goto_out;
@ -2191,6 +2196,9 @@ int refresh_toolcontext(struct cmd_context *cmd)
if (!_init_formats(cmd))
return_0;
if (!lvmcache_init(cmd))
return_0;
if (!init_lvmcache_orphans(cmd))
return_0;

View File

@ -236,7 +236,7 @@ struct cmd_context {
* system_dir may be NULL to use the default value.
* The environment variable LVM_SYSTEM_DIR always takes precedence.
*/
struct cmd_context *create_toolcontext(unsigned is_long_lived,
struct cmd_context *create_toolcontext(unsigned is_clvmd,
const char *system_dir,
unsigned set_buffering,
unsigned threaded,

229
lib/device/bcache-utils.c Normal file
View File

@ -0,0 +1,229 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "bcache.h"
// FIXME: need to define this in a common place (that doesn't pull in deps)
#ifndef SECTOR_SHIFT
#define SECTOR_SHIFT 9
#endif
//----------------------------------------------------------------
static void byte_range_to_block_range(struct bcache *cache, uint64_t start, size_t len,
block_address *bb, block_address *be)
{
block_address block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
*bb = start / block_size;
*be = (start + len + block_size - 1) / block_size;
}
static uint64_t _min(uint64_t lhs, uint64_t rhs)
{
if (rhs < lhs)
return rhs;
return lhs;
}
//----------------------------------------------------------------
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{
block_address bb, be;
byte_range_to_block_range(cache, start, len, &bb, &be);
while (bb < be) {
bcache_prefetch(cache, fd, bb);
bb++;
}
}
//----------------------------------------------------------------
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size;
bcache_prefetch_bytes(cache, fd, start, len);
byte_range_to_block_range(cache, start, len, &bb, &be);
for (; bb != be; bb++) {
if (!bcache_get(cache, fd, bb, 0, &b, NULL))
return false;
size_t blen = _min(block_size - block_offset, len);
memcpy(data, ((unsigned char *) b->data) + block_offset, blen);
bcache_put(b);
block_offset = 0;
len -= blen;
data = ((unsigned char *) data) + blen;
}
return true;
}
//----------------------------------------------------------------
// Writing bytes and zeroing bytes are very similar, so we factor out
// this common code.
struct updater;
typedef bool (*partial_update_fn)(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len);
typedef bool (*whole_update_fn)(struct updater *u, int fd, block_address bb, block_address be);
struct updater {
struct bcache *cache;
partial_update_fn partial_fn;
whole_update_fn whole_fn;
void *data;
};
static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
{
struct bcache *cache = u->cache;
block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size;
uint64_t nr_whole;
byte_range_to_block_range(cache, start, len, &bb, &be);
// If the last block is partial, we will require a read, so let's
// prefetch it.
if ((start + len) % block_size)
bcache_prefetch(cache, fd, (start + len) / block_size);
// First block may be partial
if (block_offset) {
size_t blen = _min(block_size - block_offset, len);
if (!u->partial_fn(u, fd, bb, block_offset, blen))
return false;
len -= blen;
if (!len)
return true;
bb++;
}
// Now we write out a set of whole blocks
nr_whole = len / block_size;
if (!u->whole_fn(u, fd, bb, bb + nr_whole))
return false;
bb += nr_whole;
len -= nr_whole * block_size;
if (!len)
return true;
// Finally we write a partial end block
return u->partial_fn(u, fd, bb, 0, len);
}
//----------------------------------------------------------------
static bool _write_partial(struct updater *u, int fd, block_address bb,
uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b, NULL))
return false;
memcpy(((unsigned char *) b->data) + offset, u->data, len);
u->data = ((unsigned char *) u->data) + len;
bcache_put(b);
return true;
}
static bool _write_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
uint64_t block_size = bcache_block_sectors(u->cache) << SECTOR_SHIFT;
for (; bb != be; bb++) {
// We don't need to read the block since we are overwriting
// it completely.
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b, NULL))
return false;
memcpy(b->data, u->data, block_size);
u->data = ((unsigned char *) u->data) + block_size;
bcache_put(b);
}
return true;
}
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct updater u;
u.cache = cache;
u.partial_fn = _write_partial;
u.whole_fn = _write_whole;
u.data = data;
return _update_bytes(&u, fd, start, len);
}
//----------------------------------------------------------------
static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b, NULL))
return false;
memset(((unsigned char *) b->data) + offset, 0, len);
bcache_put(b);
return true;
}
static bool _zero_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
for (; bb != be; bb++) {
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b, NULL))
return false;
bcache_put(b);
}
return true;
}
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{
struct updater u;
u.cache = cache;
u.partial_fn = _zero_partial;
u.whole_fn = _zero_whole;
u.data = NULL;
return _update_bytes(&u, fd, start, len);
}
//----------------------------------------------------------------

View File

@ -14,6 +14,10 @@
#define _GNU_SOURCE
#include "bcache.h"
#include "dm-logging.h"
#include "log.h"
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
@ -28,10 +32,6 @@
#include <sys/ioctl.h>
#include <sys/user.h>
#include "bcache.h"
#include "dm-logging.h"
#include "log.h"
#define SECTOR_SHIFT 9L
//----------------------------------------------------------------
@ -863,6 +863,11 @@ void bcache_destroy(struct bcache *cache)
dm_free(cache);
}
sector_t bcache_block_sectors(struct bcache *cache)
{
return cache->block_sectors;
}
unsigned bcache_nr_cache_blocks(struct bcache *cache)
{
return cache->nr_cache_blocks;
@ -1029,150 +1034,5 @@ bool bcache_invalidate_fd(struct bcache *cache, int fd)
return r;
}
static void byte_range_to_block_range(struct bcache *cache, off_t start, size_t len,
block_address *bb, block_address *be)
{
block_address block_size = cache->block_sectors << SECTOR_SHIFT;
*bb = start / block_size;
*be = (start + len + block_size - 1) / block_size;
}
void bcache_prefetch_bytes(struct bcache *cache, int fd, off_t start, size_t len)
{
block_address bb, be;
byte_range_to_block_range(cache, start, len, &bb, &be);
while (bb < be) {
bcache_prefetch(cache, fd, bb);
bb++;
}
}
static off_t _min(off_t lhs, off_t rhs)
{
if (rhs < lhs)
return rhs;
return lhs;
}
// These functions are all utilities, they should only use the public
// interface to bcache.
// FIXME: there's common code that can be factored out of these 3
bool bcache_read_bytes(struct bcache *cache, int fd, off_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be, i;
unsigned char *udata = data;
off_t block_size = cache->block_sectors << SECTOR_SHIFT;
int errors = 0;
byte_range_to_block_range(cache, start, len, &bb, &be);
for (i = bb; i < be; i++)
bcache_prefetch(cache, fd, i);
for (i = bb; i < be; i++) {
if (!bcache_get(cache, fd, i, 0, &b, NULL)) {
log_error("bcache_read_bytes failed to get block %u fd %d bb %u be %u",
(uint32_t)i, fd, (uint32_t)bb, (uint32_t)be);
errors++;
continue;
}
if (i == bb) {
off_t block_offset = start % block_size;
size_t blen = _min(block_size - block_offset, len);
memcpy(udata, ((unsigned char *) b->data) + block_offset, blen);
len -= blen;
udata += blen;
} else {
size_t blen = _min(block_size, len);
memcpy(udata, b->data, blen);
len -= blen;
udata += blen;
}
bcache_put(b);
}
return errors ? false : true;
}
bool bcache_write_bytes(struct bcache *cache, int fd, off_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be, i;
unsigned char *udata = data;
off_t block_size = cache->block_sectors << SECTOR_SHIFT;
int errors = 0;
byte_range_to_block_range(cache, start, len, &bb, &be);
for (i = bb; i < be; i++)
bcache_prefetch(cache, fd, i);
for (i = bb; i < be; i++) {
if (!bcache_get(cache, fd, i, GF_DIRTY, &b, NULL)) {
log_error("bcache_write_bytes failed to get block %u fd %d bb %u be %u",
(uint32_t)i, fd, (uint32_t)bb, (uint32_t)be);
errors++;
continue;
}
if (i == bb) {
off_t block_offset = start % block_size;
size_t blen = _min(block_size - block_offset, len);
memcpy(((unsigned char *) b->data) + block_offset, udata, blen);
len -= blen;
udata += blen;
} else {
size_t blen = _min(block_size, len);
memcpy(b->data, udata, blen);
len -= blen;
udata += blen;
}
bcache_put(b);
}
return errors ? false : true;
}
bool bcache_write_zeros(struct bcache *cache, int fd, off_t start, size_t len)
{
struct block *b;
block_address bb, be, i;
off_t block_size = cache->block_sectors << SECTOR_SHIFT;
int errors = 0;
byte_range_to_block_range(cache, start, len, &bb, &be);
for (i = bb; i < be; i++)
bcache_prefetch(cache, fd, i);
for (i = bb; i < be; i++) {
if (!bcache_get(cache, fd, i, GF_DIRTY, &b, NULL)) {
log_error("bcache_write_bytes failed to get block %u fd %d bb %u be %u",
(uint32_t)i, fd, (uint32_t)bb, (uint32_t)be);
errors++;
continue;
}
if (i == bb) {
off_t block_offset = start % block_size;
size_t blen = _min(block_size - block_offset, len);
memset(((unsigned char *) b->data) + block_offset, 0, blen);
len -= blen;
} else {
size_t blen = _min(block_size, len);
memset(b->data, 0, blen);
len -= blen;
}
bcache_put(b);
}
return errors ? false : true;
}
//----------------------------------------------------------------

View File

@ -15,12 +15,12 @@
#ifndef BCACHE_H
#define BCACHE_H
#include "libdevmapper.h"
#include <linux/fs.h>
#include <stdint.h>
#include <stdbool.h>
#include "libdevmapper.h"
/*----------------------------------------------------------------*/
// FIXME: move somewhere more sensible
@ -97,6 +97,7 @@ enum bcache_get_flags {
GF_DIRTY = (1 << 1)
};
sector_t bcache_block_sectors(struct bcache *cache);
unsigned bcache_nr_cache_blocks(struct bcache *cache);
unsigned bcache_max_prefetches(struct bcache *cache);
@ -150,18 +151,18 @@ bool bcache_invalidate(struct bcache *cache, int fd, block_address index);
*/
bool bcache_invalidate_fd(struct bcache *cache, int fd);
/*
* Prefetches the blocks neccessary to satisfy a byte range.
*/
void bcache_prefetch_bytes(struct bcache *cache, int fd, off_t start, size_t len);
/*
* Reads and writes the bytes. Returns false if errors occur.
*/
bool bcache_read_bytes(struct bcache *cache, int fd, off_t start, size_t len, void *data);
bool bcache_write_bytes(struct bcache *cache, int fd, off_t start, size_t len, void *data);
bool bcache_write_zeros(struct bcache *cache, int fd, off_t start, size_t len);
//----------------------------------------------------------------
// The next four functions are utilities written in terms of the above api.
// Prefetches the blocks neccessary to satisfy a byte range.
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
/*----------------------------------------------------------------*/
// Reads, writes and zeroes bytes. Returns false if errors occur.
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
//----------------------------------------------------------------
#endif

View File

@ -988,7 +988,7 @@ bool dev_write_zeros(struct device *dev, off_t start, size_t len)
}
}
if (!bcache_write_zeros(scan_bcache, dev->bcache_fd, start, len)) {
if (!bcache_zero_bytes(scan_bcache, dev->bcache_fd, start, len)) {
log_error("dev_write_zeros %s at %u bcache write failed invalidate fd %d",
dev_name(dev), (uint32_t)start, dev->bcache_fd);
label_scan_invalidate(dev);

View File

@ -4599,7 +4599,7 @@ struct volume_group *vg_read_by_vgid(struct cmd_context *cmd,
if (!(vgname = lvmcache_vgname_from_vgid(cmd->mem, vgid))) {
log_debug_metadata("Reading VG by vgid %.8s no VG name found, retrying.", vgid);
lvmcache_destroy(cmd, 0, 0);
lvmcache_destroy(cmd, 1, 0);
label_scan_destroy(cmd);
lvmcache_label_scan(cmd);
warn_flags |= SKIP_RESCAN;
@ -4630,7 +4630,7 @@ struct volume_group *vg_read_by_vgid(struct cmd_context *cmd,
return vg;
scan:
lvmcache_destroy(cmd, 0, 0);
lvmcache_destroy(cmd, 1, 0);
label_scan_destroy(cmd);
lvmcache_label_scan(cmd);
warn_flags |= SKIP_RESCAN;

View File

@ -3200,6 +3200,27 @@ static int _raid_remove_images(struct logical_volume *lv, int yes,
return 1;
}
/* Check if single SubLV @slv is degraded. */
static int _sublv_is_degraded(const struct logical_volume *slv)
{
return !slv || lv_is_partial(slv) || lv_is_virtual(slv);
}
/* Return failed component SubLV count for @lv. */
static uint32_t _lv_get_nr_failed_components(const struct logical_volume *lv)
{
uint32_t r = 0, s;
struct lv_segment *seg = first_seg(lv);
for (s = 0; s < seg->area_count; s++)
if (_sublv_is_degraded(seg_lv(seg, s)) ||
(seg->meta_areas &&
_sublv_is_degraded(seg_metalv(seg, s))))
r++;
return r;
}
/*
* _lv_raid_change_image_count
* new_count: The absolute count of images (e.g. '2' for a 2-way mirror)
@ -3215,12 +3236,25 @@ static int _lv_raid_change_image_count(struct logical_volume *lv, int yes, uint3
struct dm_list *allocate_pvs, struct dm_list *removal_lvs,
int commit, int use_existing_area_len)
{
int r;
uint32_t old_count = lv_raid_image_count(lv);
/* If there's failed component SubLVs, require repair first! */
if (lv_is_raid(lv) &&
_lv_get_nr_failed_components(lv) &&
new_count >= old_count) {
log_error("Can't change number of mirrors of degraded %s.",
display_lvname(lv));
log_error("Please run \"lvconvert --repair %s\" first.",
display_lvname(lv));
r = 0;
} else
r = 1;
if (old_count == new_count) {
log_warn("WARNING: %s already has image count of %d.",
display_lvname(lv), new_count);
return 1;
return r;
}
/*
@ -6755,10 +6789,8 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
return 0;
}
if (lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)) ||
lv_is_partial(seg_lv(raid_seg, s)) ||
lv_is_partial(seg_metalv(raid_seg, s)) ||
if (_sublv_is_degraded(seg_lv(raid_seg, s)) ||
_sublv_is_degraded(seg_metalv(raid_seg, s)) ||
lv_is_on_pvs(seg_lv(raid_seg, s), remove_pvs) ||
lv_is_on_pvs(seg_metalv(raid_seg, s), remove_pvs)) {
match_count++;
@ -7088,10 +7120,8 @@ static int _partial_raid_lv_is_redundant(const struct logical_volume *lv)
if (!(i % copies))
rebuilds_per_group = 0;
if (lv_is_partial(seg_lv(raid_seg, s)) ||
lv_is_partial(seg_metalv(raid_seg, s)) ||
lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)))
if (_sublv_is_degraded(seg_lv(raid_seg, s)) ||
_sublv_is_degraded(seg_metalv(raid_seg, s)))
rebuilds_per_group++;
if (rebuilds_per_group >= copies) {
@ -7104,14 +7134,7 @@ static int _partial_raid_lv_is_redundant(const struct logical_volume *lv)
return 1; /* Redundant */
}
for (s = 0; s < raid_seg->area_count; s++) {
if (lv_is_partial(seg_lv(raid_seg, s)) ||
lv_is_partial(seg_metalv(raid_seg, s)) ||
lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)))
failed_components++;
}
failed_components = _lv_get_nr_failed_components(lv);
if (failed_components == raid_seg->area_count) {
log_verbose("All components of raid LV %s have failed.",
display_lvname(lv));

View File

@ -23,7 +23,6 @@
#define _REENTRANT
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
/*
* Symbol export control macros

View File

@ -442,11 +442,12 @@ endif
.LIBPATTERNS = lib%.so lib%.a
DEPFLAGS=-MT $@ -MMD -MP -MF $*.d
# still needed in 2018 for 32bit builds
DEFS+=-D_FILE_OFFSET_BITS=64
%.o: %.c
@echo " [CC] $<"
$(Q) $(CC) $(DEPFLAGS) -c $(INCLUDES) $(VALGRIND_CFLAGS) $(PROGS_CFLAGS) $(DEFS) $(DEFS_$@) $(WFLAGS) $(WCFLAGS) $(CFLAGS) $(CFLAGS_$@) $< -o $@
$(Q) $(CC) -c $(INCLUDES) $(VALGRIND_CFLAGS) $(PROGS_CFLAGS) $(DEFS) $(DEFS_$@) $(WFLAGS) $(WCFLAGS) $(CFLAGS) $(CFLAGS_$@) $< -o $@
%.o: %.cpp
@echo " [CXX] $<"
@ -506,9 +507,18 @@ $(LIB_STATIC): $(OBJECTS)
$(Q) $(RM) $@
$(Q) $(AR) rsv $@ $(OBJECTS) > /dev/null
%.d: $(INC_LNS)
.PRECIOUS: %.d
%.d: %.c $(INC_LNS)
@echo " [DEP] $<"
$(Q) $(MKDIR_P) $(dir $@); \
set -e; \
FILE=`echo $@ | sed 's/\\//\\\\\\//g;s/\\.d//g'`; \
DEPS=`echo $(DEPS) | sed -e 's/\\//\\\\\\//g'`; \
$(CC) -MM $(INCLUDES) $(VALGRIND_CFLAGS) $(PROGS_CFLAGS) $(DEFS) -o $@ $<; \
sed -i "s/\(.*\)\.o[ :]*/$$FILE.o $$FILE.d $$FILE.pot: $$DEPS /g" $@; \
DEPLIST=`sed 's/ \\\\//;s/.*://;' < $@`; \
echo $$DEPLIST | fmt -1 | sed 's/ //g;s/\(.*\)/\1:/' >> $@; \
[ -s $@ ] || $(RM) $@
%.mo: %.po
@echo " [MSGFMT] $<"
$(Q) $(MSGFMT) -o $@ $<

View File

@ -1492,10 +1492,6 @@ have_thin() {
}
have_raid() {
test "$RAID" = shared -o "$RAID" = internal || {
echo "Raid is not built-in." >&2
return 1;
}
target_at_least dm-raid "$@"
# some kernels have broken mdraid bitmaps, don't use them!

View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMLOCKD=1
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_raid 1 3 0 || skip
aux lvmconf 'activation/raid_fault_policy = "warn"'
aux prepare_vg 3 32
get_devs
# Create 2-legged RAID1 and wait for it to complete initial resync
lvcreate --type raid1 -m 1 -l 4 -n $lv $vg "$dev1" "$dev2"
aux wait_for_sync $vg $lv
# Disable first PV thus erroring first leg
aux disable_dev "$dev1"
# Reduce VG by missing PV
vgreduce --force --removemissing $vg
check raid_leg_status $vg $lv "DA"
# Conversion to 2 legs must fail on degraded 2-legged raid1 LV
not lvconvert -y -m1 $vg/$lv
check raid_leg_status $vg $lv "DA"
# Repair has to succeed
lvconvert -y --repair $vg/$lv
aux wait_for_sync $vg $lv
check raid_leg_status $vg $lv "AA"
lvremove -ff $vg/$lv
vgremove -ff $vg

View File

@ -12,6 +12,7 @@
UNIT_SOURCE=\
test/unit/bcache_t.c \
test/unit/bcache_utils_t.c \
test/unit/bitset_t.c \
test/unit/config_t.c \
test/unit/dmlist_t.c \

352
test/unit/bcache_utils_t.c Normal file
View File

@ -0,0 +1,352 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _GNU_SOURCE
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "bcache.h"
#include "framework.h"
#include "units.h"
//----------------------------------------------------------------
#define T_BLOCK_SIZE 4096
#define NR_BLOCKS 64
#define INIT_PATTERN 123
struct fixture {
int fd;
char fname[32];
struct bcache *cache;
};
static inline uint8_t _pattern_at(uint8_t pat, uint8_t byte)
{
return pat + byte;
}
static uint64_t byte(block_address b, uint64_t offset)
{
return b * T_BLOCK_SIZE + offset;
}
static void *_fix_init(void)
{
uint8_t buffer[T_BLOCK_SIZE];
struct io_engine *engine;
struct fixture *f = malloc(sizeof(*f));
unsigned b, i;
T_ASSERT(f);
snprintf(f->fname, sizeof(f->fname), "unit-test-XXXXXX");
f->fd = mkostemp(f->fname, O_RDWR | O_CREAT | O_EXCL);
T_ASSERT(f->fd >= 0);
for (b = 0; b < NR_BLOCKS; b++) {
for (i = 0; i < sizeof(buffer); i++)
buffer[i] = _pattern_at(INIT_PATTERN, byte(b, i));
T_ASSERT(write(f->fd, buffer, T_BLOCK_SIZE) > 0);
}
close(f->fd);
// reopen with O_DIRECT
f->fd = open(f->fname, O_RDWR | O_DIRECT);
T_ASSERT(f->fd >= 0);
engine = create_async_io_engine();
T_ASSERT(engine);
f->cache = bcache_create(T_BLOCK_SIZE / 512, NR_BLOCKS, engine);
T_ASSERT(f->cache);
return f;
}
static void _fix_exit(void *fixture)
{
struct fixture *f = fixture;
bcache_destroy(f->cache);
close(f->fd);
unlink(f->fname);
free(f);
}
//----------------------------------------------------------------
static void _verify_bytes(struct block *b, uint64_t base,
uint64_t offset, uint64_t len, uint8_t pat)
{
unsigned i;
for (i = 0; i < len; i++)
T_ASSERT_EQUAL(((uint8_t *) b->data)[offset + i], _pattern_at(pat, base + offset + i));
}
static void _zero_bytes(struct block *b, uint64_t offset, uint64_t len)
{
memset(((uint8_t *) b->data) + offset, 0, len);
}
static uint64_t _min(uint64_t lhs, uint64_t rhs)
{
return rhs < lhs ? rhs : lhs;
}
static void _verify(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8_t pat)
{
int err;
struct block *b;
block_address bb = byte_b / T_BLOCK_SIZE;
block_address be = (byte_e + T_BLOCK_SIZE - 1) / T_BLOCK_SIZE;
uint64_t offset = byte_b % T_BLOCK_SIZE;
uint64_t blen, len = byte_e - byte_b;
// Verify via bcache_read_bytes
{
unsigned i;
size_t len2 = byte_e - byte_b;
uint8_t *buffer = malloc(len2);
T_ASSERT(bcache_read_bytes(f->cache, f->fd, byte_b, len2, buffer));
for (i = 0; i < len; i++)
T_ASSERT_EQUAL(buffer[i], _pattern_at(pat, byte_b + i));
}
// Verify again, driving bcache directly
for (; bb != be; bb++) {
T_ASSERT(bcache_get(f->cache, f->fd, bb, 0, &b, &err));
blen = _min(T_BLOCK_SIZE - offset, len);
_verify_bytes(b, bb * T_BLOCK_SIZE, offset, blen, pat);
offset = 0;
len -= blen;
bcache_put(b);
}
}
static void _verify_zeroes(struct fixture *f, uint64_t byte_b, uint64_t byte_e)
{
int err;
unsigned i;
struct block *b;
block_address bb = byte_b / T_BLOCK_SIZE;
block_address be = (byte_e + T_BLOCK_SIZE - 1) / T_BLOCK_SIZE;
uint64_t offset = byte_b % T_BLOCK_SIZE;
uint64_t blen, len = byte_e - byte_b;
for (; bb != be; bb++) {
T_ASSERT(bcache_get(f->cache, f->fd, bb, 0, &b, &err));
blen = _min(T_BLOCK_SIZE - offset, len);
for (i = 0; i < blen; i++)
T_ASSERT(((uint8_t *) b->data)[offset + i] == 0);
offset = 0;
len -= blen;
bcache_put(b);
}
}
static void _do_write(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8_t pat)
{
unsigned i;
size_t len = byte_e - byte_b;
uint8_t *buffer = malloc(len);
T_ASSERT(buffer);
for (i = 0; i < len; i++)
buffer[i] = _pattern_at(pat, byte_b + i);
T_ASSERT(bcache_write_bytes(f->cache, f->fd, byte_b, i, buffer));
free(buffer);
}
static void _do_zero(struct fixture *f, uint64_t byte_b, uint64_t byte_e)
{
int err;
struct block *b;
block_address bb = byte_b / T_BLOCK_SIZE;
block_address be = (byte_e + T_BLOCK_SIZE - 1) / T_BLOCK_SIZE;
uint64_t offset = byte_b % T_BLOCK_SIZE;
uint64_t blen, len = byte_e - byte_b;
for (; bb != be; bb++) {
T_ASSERT(bcache_get(f->cache, f->fd, bb, GF_DIRTY, &b, &err));
blen = _min(T_BLOCK_SIZE - offset, len);
_zero_bytes(b, offset, blen);
offset = 0;
len -= blen;
bcache_put(b);
}
}
static void _reopen(struct fixture *f)
{
struct io_engine *engine;
bcache_destroy(f->cache);
engine = create_async_io_engine();
T_ASSERT(engine);
f->cache = bcache_create(T_BLOCK_SIZE / 512, NR_BLOCKS, engine);
T_ASSERT(f->cache);
}
//----------------------------------------------------------------
static uint8_t _random_pattern(void)
{
return random();
}
static uint64_t _max_byte(void)
{
return T_BLOCK_SIZE * NR_BLOCKS;
}
static void _rwv_cycle(struct fixture *f, uint64_t b, uint64_t e)
{
uint8_t pat = _random_pattern();
_verify(f, b, e, INIT_PATTERN);
_do_write(f, b, e, pat);
_reopen(f);
_verify(f, b < 128 ? 0 : b - 128, b, INIT_PATTERN);
_verify(f, b, e, pat);
_verify(f, e, _min(e + 128, _max_byte()), INIT_PATTERN);
}
static void _test_rw_first_block(void *fixture)
{
_rwv_cycle(fixture, byte(0, 0), byte(0, T_BLOCK_SIZE));
}
static void _test_rw_last_block(void *fixture)
{
uint64_t last_block = NR_BLOCKS - 1;
_rwv_cycle(fixture, byte(last_block, 0),
byte(last_block, T_BLOCK_SIZE));
}
static void _test_rw_several_whole_blocks(void *fixture)
{
_rwv_cycle(fixture, byte(5, 0), byte(10, 0));
}
static void _test_rw_within_single_block(void *fixture)
{
_rwv_cycle(fixture, byte(7, 3), byte(7, T_BLOCK_SIZE / 2));
}
static void _test_rw_cross_one_boundary(void *fixture)
{
_rwv_cycle(fixture, byte(13, 43), byte(14, 43));
}
static void _test_rw_many_boundaries(void *fixture)
{
_rwv_cycle(fixture, byte(13, 13), byte(23, 13));
}
//----------------------------------------------------------------
static void _zero_cycle(struct fixture *f, uint64_t b, uint64_t e)
{
_verify(f, b, e, INIT_PATTERN);
_do_zero(f, b, e);
_reopen(f);
_verify(f, b < 128 ? 0 : b - 128, b, INIT_PATTERN);
_verify_zeroes(f, b, e);
_verify(f, e, _min(e + 128, _max_byte()), INIT_PATTERN);
}
static void _test_zero_first_block(void *fixture)
{
_zero_cycle(fixture, byte(0, 0), byte(0, T_BLOCK_SIZE));
}
static void _test_zero_last_block(void *fixture)
{
uint64_t last_block = NR_BLOCKS - 1;
_zero_cycle(fixture, byte(last_block, 0), byte(last_block, T_BLOCK_SIZE));
}
static void _test_zero_several_whole_blocks(void *fixture)
{
_zero_cycle(fixture, byte(5, 0), byte(10, 0));
}
static void _test_zero_within_single_block(void *fixture)
{
_zero_cycle(fixture, byte(7, 3), byte(7, T_BLOCK_SIZE / 2));
}
static void _test_zero_cross_one_boundary(void *fixture)
{
_zero_cycle(fixture, byte(13, 43), byte(14, 43));
}
static void _test_zero_many_boundaries(void *fixture)
{
_zero_cycle(fixture, byte(13, 13), byte(23, 13));
}
//----------------------------------------------------------------
#define T(path, desc, fn) register_test(ts, "/base/device/bcache/utils/" path, desc, fn)
static struct test_suite *_tests(void)
{
struct test_suite *ts = test_suite_create(_fix_init, _fix_exit);
if (!ts) {
fprintf(stderr, "out of memory\n");
exit(1);
}
T("rw-first-block", "read/write/verify the first block", _test_rw_first_block);
T("rw-last-block", "read/write/verify the last block", _test_rw_last_block);
T("rw-several-blocks", "read/write/verify several whole blocks", _test_rw_several_whole_blocks);
T("rw-within-single-block", "read/write/verify within single block", _test_rw_within_single_block);
T("rw-cross-one-boundary", "read/write/verify across one boundary", _test_rw_cross_one_boundary);
T("rw-many-boundaries", "read/write/verify many boundaries", _test_rw_many_boundaries);
T("zero-first-block", "zero the first block", _test_zero_first_block);
T("zero-last-block", "zero the last block", _test_zero_last_block);
T("zero-several-blocks", "zero several whole blocks", _test_zero_several_whole_blocks);
T("zero-within-single-block", "zero within single block", _test_zero_within_single_block);
T("zero-cross-one-boundary", "zero across one boundary", _test_zero_cross_one_boundary);
T("zero-many-boundaries", "zero many boundaries", _test_zero_many_boundaries);
return ts;
}
void bcache_utils_tests(struct dm_list *all_tests)
{
dm_list_add(all_tests, &_tests()->list);
}

View File

@ -72,6 +72,45 @@ static void test_matching(void *fixture)
T_ASSERT_EQUAL(dm_regex_match(scanner, nonprint[i].str), nonprint[i].expected - 1);
}
static void test_kabi_query(void *fixture)
{
// Remember, matches regexes from last to first.
static const char *_patterns[] = {
".*", ".*/dev/md.*", "loop"
};
static struct {
const char *input;
int r;
} _cases[] = {
{"foo", 0},
{"/dev/mapper/vg-lvol1", 0},
{"/dev/mapper/vglvol1", 0},
{"/dev/md1", 1},
{"loop", 2},
};
int r;
unsigned i;
struct dm_pool *mem = fixture;
struct dm_regex *scanner;
scanner = dm_regex_create(mem, _patterns, DM_ARRAY_SIZE(_patterns));
T_ASSERT(scanner != NULL);
for (i = 0; i < DM_ARRAY_SIZE(_cases); i++) {
r = dm_regex_match(scanner, _cases[i].input);
if (r != _cases[i].r) {
test_fail("'%s' expected to match '%s', but matched %s",
_cases[i].input,
_cases[i].r >= DM_ARRAY_SIZE(_patterns) ? "<nothing>" : _patterns[_cases[i].r],
r >= DM_ARRAY_SIZE(_patterns) ? "<nothing>" : _patterns[r]);
}
}
}
#define T(path, desc, fn) register_test(ts, "/base/regex/" path, desc, fn)
void regex_tests(struct dm_list *all_tests)
@ -84,6 +123,7 @@ void regex_tests(struct dm_list *all_tests)
T("fingerprints", "not sure", test_fingerprints);
T("matching", "test the matcher with a variety of regexes", test_matching);
T("kabi-query", "test the matcher with some specific patterns", test_kabi_query);
dm_list_add(all_tests, &ts->list);
}

View File

@ -21,6 +21,7 @@
// Declare the function that adds tests suites here ...
void bcache_tests(struct dm_list *suites);
void bcache_utils_tests(struct dm_list *suites);
void bitset_tests(struct dm_list *suites);
void config_tests(struct dm_list *suites);
void dm_list_tests(struct dm_list *suites);
@ -34,6 +35,7 @@ void string_tests(struct dm_list *suites);
static inline void register_all_tests(struct dm_list *suites)
{
bcache_tests(suites);
bcache_utils_tests(suites);
bitset_tests(suites);
config_tests(suites);
dm_list_tests(suites);

View File

@ -19,7 +19,6 @@
#define _LVM_TOOL_H
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "configure.h"

View File

@ -111,7 +111,7 @@ int become_daemon(struct cmd_context *cmd, int skip_lvm)
if (!skip_lvm) {
reset_locking();
lvmcache_destroy(cmd, 1, 1);
if (!lvmcache_init())
if (!lvmcache_init(cmd))
/* FIXME Clean up properly here */
_exit(ECMD_FAILED);
}
@ -4622,7 +4622,7 @@ int process_each_pv(struct cmd_context *cmd,
log_verbose("Some PVs were not found in first search, retrying.");
lvmcache_destroy(cmd, 0, 0);
if (!lvmcache_init()) {
if (!lvmcache_init(cmd)) {
log_error("Failed to initalize lvm cache.");
ret_max = ECMD_FAILED;
goto out;