1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00
lvm2/lib/activate/dev_manager.c

4250 lines
119 KiB
C
Raw Normal View History

/*
2008-01-30 17:00:02 +03:00
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "dev_manager.h"
#include "lib/misc/lvm-string.h"
#include "fs.h"
#include "lib/config/defaults.h"
#include "lib/metadata/segtype.h"
#include "lib/display/display.h"
#include "lib/commands/toolcontext.h"
#include "lib/activate/targets.h"
#include "lib/config/config.h"
#include "lib/activate/activate.h"
#include "lib/misc/lvm-exec.h"
#include "lib/datastruct/str_list.h"
#include "lib/misc/lvm-signal.h"
#include <limits.h>
#include <dirent.h>
#define MAX_TARGET_PARAMSIZE 50000
#define LVM_UDEV_NOSCAN_FLAG DM_SUBSYSTEM_UDEV_FLAG0
#define CRYPT_TEMP "CRYPT-TEMP"
#define CRYPT_SUBDEV "CRYPT-SUBDEV"
#define STRATIS "stratis-"
2002-03-16 01:59:12 +03:00
typedef enum {
PRELOAD,
2002-03-16 01:59:12 +03:00
ACTIVATE,
DEACTIVATE,
2002-03-16 01:59:12 +03:00
SUSPEND,
SUSPEND_WITH_LOCKFS,
CLEAN
} action_t;
2002-03-16 01:59:12 +03:00
/* This list must match lib/misc/lvm-string.c:build_dm_uuid(). */
static const char * const _uuid_suffix_list[] = { "pool", "cdata", "cmeta", "cvol", "tdata", "tmeta", "vdata", "vpool", "imeta", NULL};
struct dlid_list {
struct dm_list list;
const char *dlid;
const struct logical_volume *lv;
};
struct dev_manager {
struct dm_pool *mem;
2004-05-05 01:25:57 +04:00
struct cmd_context *cmd;
void *target_state;
uint32_t pvmove_mirror_count;
int flush_required;
int activation; /* building activation tree */
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
int suspend; /* building suspend tree */
unsigned track_pending_delete;
unsigned track_pvmove_deps;
const char *vg_name;
};
struct lv_layer {
const struct logical_volume *lv;
const char *old_name;
int visible_component;
};
int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts, const char *layer)
{
if (layer && lv_is_cow(lv))
return 0; /* Keep snapshot's COW volume writable */
if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv))
return 0; /* Keep RAID SubLvs writable */
if (!layer) {
if (lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
return 1;
}
return (laopts->read_only || !(lv->status & LVM_WRITE));
}
2002-02-25 17:46:57 +03:00
/*
* Low level device-layer operations.
*
* Unless task is DM_DEVICE_TARGET_MSG, also calls dm_task_run()
2002-02-25 17:46:57 +03:00
*/
static struct dm_task *_setup_task_run(int task, struct dm_info *info,
const char *name, const char *uuid,
uint32_t *event_nr,
uint32_t major, uint32_t minor,
int with_open_count,
int with_flush,
int query_inactive)
2002-02-25 17:46:57 +03:00
{
char vsn[80];
unsigned maj, min;
2002-02-25 17:46:57 +03:00
struct dm_task *dmt;
2008-01-30 16:19:47 +03:00
if (!(dmt = dm_task_create(task)))
return_NULL;
2002-02-25 17:46:57 +03:00
2011-01-05 17:03:36 +03:00
if (name && !dm_task_set_name(dmt, name))
goto_out;
2011-01-05 17:03:36 +03:00
if (uuid && *uuid && !dm_task_set_uuid(dmt, uuid))
goto_out;
2011-01-05 17:03:36 +03:00
if (event_nr && !dm_task_set_event_nr(dmt, *event_nr))
goto_out;
2003-04-30 19:26:25 +04:00
2011-01-05 17:03:36 +03:00
if (major && !dm_task_set_major_minor(dmt, major, minor, 1))
goto_out;
if (activation_checks() && !dm_task_enable_checks(dmt))
goto_out;
if (query_inactive && !dm_task_query_inactive_table(dmt)) {
log_error("Failed to set query_inactive_table.");
goto out;
}
if (!with_open_count && !dm_task_no_open_count(dmt))
log_warn("WARNING: Failed to disable open_count.");
if (!with_flush && !dm_task_no_flush(dmt))
log_warn("WARNING: Failed to set no_flush.");
switch (task) {
case DM_DEVICE_TARGET_MSG:
return dmt; /* TARGET_MSG needs more local tweaking before task_run() */
case DM_DEVICE_LIST:
/* Use 'newuuid' only with DM version that supports it */
if (driver_version(vsn, sizeof(vsn)) &&
(sscanf(vsn, "%u.%u", &maj, &min) == 2) &&
(maj == 4 ? min >= 19 : maj > 4) &&
!dm_task_set_newuuid(dmt, " ")) // new uuid has no meaning here
log_warn("WARNING: Failed to query uuid with LIST.");
break;
default:
break;
}
if (!dm_task_run(dmt))
goto_out;
if (info && !dm_task_get_info(dmt, info))
goto_out;
2002-02-25 17:46:57 +03:00
return dmt;
out:
2011-01-05 17:03:36 +03:00
dm_task_destroy(dmt);
2016-12-01 16:53:35 +03:00
2011-01-05 17:03:36 +03:00
return NULL;
2002-02-25 17:46:57 +03:00
}
/* Read info from DM VDO 'stats' message */
static int _vdo_pool_message_stats(struct dm_pool *mem,
const struct logical_volume *lv,
struct lv_status_vdo *status)
{
const char *response;
const char *dlid;
struct dm_task *dmt = NULL;
int r = 0;
unsigned i;
const char *p;
struct vdo_msg_elem {
const char *name;
uint64_t *val;
} const vme[] = { /* list of properties lvm2 wants to parse */
{ "dataBlocksUsed", &status->data_blocks_used },
{ "logicalBlocksUsed", &status->logical_blocks_used }
};
for (i = 0; i < DM_ARRAY_SIZE(vme); ++i)
*vme[i].val = ULLONG_MAX;
if (!(dlid = build_dm_uuid(mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 0, 0)))
return_0;
if (!dm_task_set_message(dmt, "stats"))
goto_out;
if (!dm_task_run(dmt))
goto_out;
log_debug_activation("Checking VDO pool stats message for LV %s.",
display_lvname(lv));
if ((response = dm_task_get_message_response(dmt))) {
for (i = 0; i < DM_ARRAY_SIZE(vme); ++i) {
errno = 0;
if (!(p = strstr(response, vme[i].name)) ||
!(p = strchr(p, ':')) ||
((*vme[i].val = strtoul(p + 1, NULL, 10)) == ULLONG_MAX) || errno) {
log_debug("Cannot parse %s in VDO DM stats message.", vme[i].name);
*vme[i].val = ULLONG_MAX;
goto out;
}
if (*vme[i].val != ULLONG_MAX)
log_debug("VDO property %s = " FMTu64, vme[i].name, *vme[i].val);
}
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
static int _get_segment_status_from_target_params(const char *target_name,
const char *params,
const struct dm_info *dminfo,
struct lv_seg_status *seg_status)
{
const struct lv_segment *seg = seg_status->seg;
const struct segment_type *segtype = seg->segtype;
seg_status->type = SEG_STATUS_UNKNOWN; /* Parsing failed */
/* Switch to snapshot segtype status logic for merging origin */
/* This is 'dynamic' decision, both states are valid */
if (lv_is_merging_origin(seg->lv)) {
if (!strcmp(target_name, TARGET_NAME_SNAPSHOT_ORIGIN)) {
seg_status->type = SEG_STATUS_NONE;
return 1; /* Merge has not yet started */
}
if (!strcmp(target_name, TARGET_NAME_SNAPSHOT_MERGE) &&
!(segtype = get_segtype_from_string(seg->lv->vg->cmd, TARGET_NAME_SNAPSHOT)))
return_0;
/* Merging, parse 'snapshot' status of merge progress */
}
if (!params) {
log_warn("WARNING: Cannot find matching %s segment for %s.",
segtype->name, display_lvname(seg_status->seg->lv));
return 0;
}
/* Validate target_name segtype from DM table with lvm2 metadata segtype */
if (!lv_is_locked(seg->lv) &&
strcmp(segtype->name, target_name) &&
/* If kernel's type isn't an exact match is it compatible? */
(!segtype->ops->target_status_compatible ||
!segtype->ops->target_status_compatible(target_name))) {
log_warn("WARNING: Detected %s segment type does not match expected type %s for %s.",
target_name, segtype->name, display_lvname(seg_status->seg->lv));
return 0;
}
/* TODO: move into segtype method */
if (segtype_is_cache(segtype)) {
if (!dm_get_status_cache(seg_status->mem, params, &(seg_status->cache)))
return_0;
seg_status->type = SEG_STATUS_CACHE;
} else if (segtype_is_raid(segtype)) {
if (!dm_get_status_raid(seg_status->mem, params, &seg_status->raid))
return_0;
seg_status->type = SEG_STATUS_RAID;
} else if (segtype_is_thin_volume(segtype)) {
if (!dm_get_status_thin(seg_status->mem, params, &seg_status->thin))
return_0;
seg_status->type = SEG_STATUS_THIN;
} else if (segtype_is_thin_pool(segtype)) {
if (!dm_get_status_thin_pool(seg_status->mem, params, &seg_status->thin_pool))
return_0;
seg_status->type = SEG_STATUS_THIN_POOL;
} else if (segtype_is_snapshot(segtype)) {
if (!dm_get_status_snapshot(seg_status->mem, params, &seg_status->snapshot))
return_0;
seg_status->type = SEG_STATUS_SNAPSHOT;
} else if (segtype_is_vdo_pool(segtype)) {
if (!_vdo_pool_message_stats(seg_status->mem, seg->lv, &seg_status->vdo_pool))
stack;
if (!parse_vdo_pool_status(seg_status->mem, seg->lv, params, dminfo, &seg_status->vdo_pool))
return_0;
seg_status->type = SEG_STATUS_VDO_POOL;
} else if (segtype_is_writecache(segtype)) {
if (!dm_get_status_writecache(seg_status->mem, params, &(seg_status->writecache)))
return_0;
seg_status->type = SEG_STATUS_WRITECACHE;
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
} else if (segtype_is_integrity(segtype)) {
if (!dm_get_status_integrity(seg_status->mem, params, &(seg_status->integrity)))
return_0;
seg_status->type = SEG_STATUS_INTEGRITY;
} else
/*
* TODO: Add support for other segment types too!
* Status not supported
*/
seg_status->type = SEG_STATUS_NONE;
return 1;
}
typedef enum {
INFO, /* DM_DEVICE_INFO ioctl */
STATUS, /* DM_DEVICE_STATUS ioctl */
} info_type_t;
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
/* Return length of segment depending on type and reshape_len */
static uint32_t _seg_len(const struct lv_segment *seg)
{
uint32_t reshape_len = seg_is_raid(seg) ? ((seg->area_count - seg->segtype->parity_devs) * seg->reshape_len) : 0;
return seg->len - reshape_len;
}
static int _info_run(const char *dlid, struct dm_info *dminfo,
uint32_t *read_ahead,
struct lv_seg_status *seg_status,
const char *name_check,
int with_open_count, int with_read_ahead,
uint32_t major, uint32_t minor)
2002-03-15 00:17:30 +03:00
{
int r = 0;
struct dm_task *dmt;
int dmtask;
int with_flush; /* TODO: arg for _info_run */
void *target = NULL;
uint64_t target_start, target_length, start, extent_size, length, length_crop = 0;
char *target_name, *target_params;
const char *devname;
if (seg_status) {
dmtask = DM_DEVICE_STATUS;
with_flush = 0;
} else {
dmtask = DM_DEVICE_INFO;
with_flush = 1; /* doesn't really matter */
}
2002-03-15 00:17:30 +03:00
if (!(dmt = _setup_task_run(dmtask, dminfo, NULL, dlid, 0, major, minor,
with_open_count, with_flush, 0)))
2008-01-30 16:19:47 +03:00
return_0;
2002-03-15 00:17:30 +03:00
if (name_check && dminfo->exists &&
(devname = dm_task_get_name(dmt)) &&
(strcmp(name_check, devname) != 0))
dminfo->exists = 0; /* mismatching name -> device does not exist */
if (with_read_ahead && read_ahead && dminfo->exists) {
2007-12-03 21:00:38 +03:00
if (!dm_task_get_read_ahead(dmt, read_ahead))
goto_out;
} else if (read_ahead)
2007-11-29 18:04:12 +03:00
*read_ahead = DM_READ_AHEAD_NONE;
/* Query status only for active device */
if (seg_status && dminfo->exists) {
extent_size = length = seg_status->seg->lv->vg->extent_size;
start = extent_size * seg_status->seg->le;
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
length *= _seg_len(seg_status->seg);
/* Uses max DM_THIN_MAX_METADATA_SIZE sectors for metadata device */
if (lv_is_thin_pool_metadata(seg_status->seg->lv) &&
(length > DM_THIN_MAX_METADATA_SIZE))
thin: improve 16g support for thin pool metadata Initial support for thin-pool used slightly smaller max size 15.81GiB for thin-pool metadata. However the real limit later settled at 15.88GiB (difference is ~64MiB - 16448 4K blocks). lvm2 could not simply increase the size as it has been using hard cropping of the loaded metadata device to avoid warnings printing warning of kernel when the size was bigger (i.e. due to bigger extent_size). This patch adds the new lvm.conf configurable setting: allocation/thin_pool_crop_metadata which defaults to 0 -> no crop of metadata beyond 15.81GiB. Only user with these sizes of metadata will be affected. Without cropping lvm2 now limits metadata allocation size to 15.88GiB. Any space beyond is currently not used by thin-pool target. Even if i.e. bigger LV is used for metadata via lvconvert, or allocated bigger because of to large extent size. With cropping enabled (=1) lvm2 preserves the old limitation 15.81GiB and should allow to work in the evironement with older lvm2 tools (i.e. older distribution). Thin-pool metadata with size bigger then 15.81G is now using CROP_METADATA flag within lvm2 metadata, so older lvm2 recognizes an incompatible thin-pool and cannot activate such pool! Users should use uncropped version as it is not suffering from various issues between thin_repair results and allocated metadata LV as thin_repair limit is 15.88GiB Users should use cropping only when really needed! Patch also better handles resize of thin-pool metadata and prevents resize beoyond usable size 15.88GiB. Resize beyond 15.81GiB automatically switches pool to no-crop version. Even with existing bigger thin-pool metadata command 'lvextend -l+1 vg/pool_tmeta' does the change. Patch gives better controls 'coverted' metadata LV and reports less confusing message during conversion. Patch set also moves the code for updating min/max into pool_manip.c for better sharing with cache_pool code.
2021-01-12 19:59:29 +03:00
length_crop = DM_THIN_MAX_METADATA_SIZE;
/* Uses virtual size with headers for VDO pool device */
if (lv_is_vdo_pool(seg_status->seg->lv))
length = get_vdo_pool_virtual_size(seg_status->seg);
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
if (lv_is_integrity(seg_status->seg->lv))
length = seg_status->seg->integrity_data_sectors;
do {
target = dm_get_next_target(dmt, target, &target_start,
&target_length, &target_name, &target_params);
thin: improve 16g support for thin pool metadata Initial support for thin-pool used slightly smaller max size 15.81GiB for thin-pool metadata. However the real limit later settled at 15.88GiB (difference is ~64MiB - 16448 4K blocks). lvm2 could not simply increase the size as it has been using hard cropping of the loaded metadata device to avoid warnings printing warning of kernel when the size was bigger (i.e. due to bigger extent_size). This patch adds the new lvm.conf configurable setting: allocation/thin_pool_crop_metadata which defaults to 0 -> no crop of metadata beyond 15.81GiB. Only user with these sizes of metadata will be affected. Without cropping lvm2 now limits metadata allocation size to 15.88GiB. Any space beyond is currently not used by thin-pool target. Even if i.e. bigger LV is used for metadata via lvconvert, or allocated bigger because of to large extent size. With cropping enabled (=1) lvm2 preserves the old limitation 15.81GiB and should allow to work in the evironement with older lvm2 tools (i.e. older distribution). Thin-pool metadata with size bigger then 15.81G is now using CROP_METADATA flag within lvm2 metadata, so older lvm2 recognizes an incompatible thin-pool and cannot activate such pool! Users should use uncropped version as it is not suffering from various issues between thin_repair results and allocated metadata LV as thin_repair limit is 15.88GiB Users should use cropping only when really needed! Patch also better handles resize of thin-pool metadata and prevents resize beoyond usable size 15.88GiB. Resize beyond 15.81GiB automatically switches pool to no-crop version. Even with existing bigger thin-pool metadata command 'lvextend -l+1 vg/pool_tmeta' does the change. Patch gives better controls 'coverted' metadata LV and reports less confusing message during conversion. Patch set also moves the code for updating min/max into pool_manip.c for better sharing with cache_pool code.
2021-01-12 19:59:29 +03:00
if ((start == target_start) &&
((length == target_length) ||
((lv_is_vdo_pool(seg_status->seg->lv)) && /* should fit within extent size */
(length < target_length) && ((length + extent_size) > target_length)) ||
thin: improve 16g support for thin pool metadata Initial support for thin-pool used slightly smaller max size 15.81GiB for thin-pool metadata. However the real limit later settled at 15.88GiB (difference is ~64MiB - 16448 4K blocks). lvm2 could not simply increase the size as it has been using hard cropping of the loaded metadata device to avoid warnings printing warning of kernel when the size was bigger (i.e. due to bigger extent_size). This patch adds the new lvm.conf configurable setting: allocation/thin_pool_crop_metadata which defaults to 0 -> no crop of metadata beyond 15.81GiB. Only user with these sizes of metadata will be affected. Without cropping lvm2 now limits metadata allocation size to 15.88GiB. Any space beyond is currently not used by thin-pool target. Even if i.e. bigger LV is used for metadata via lvconvert, or allocated bigger because of to large extent size. With cropping enabled (=1) lvm2 preserves the old limitation 15.81GiB and should allow to work in the evironement with older lvm2 tools (i.e. older distribution). Thin-pool metadata with size bigger then 15.81G is now using CROP_METADATA flag within lvm2 metadata, so older lvm2 recognizes an incompatible thin-pool and cannot activate such pool! Users should use uncropped version as it is not suffering from various issues between thin_repair results and allocated metadata LV as thin_repair limit is 15.88GiB Users should use cropping only when really needed! Patch also better handles resize of thin-pool metadata and prevents resize beoyond usable size 15.88GiB. Resize beyond 15.81GiB automatically switches pool to no-crop version. Even with existing bigger thin-pool metadata command 'lvextend -l+1 vg/pool_tmeta' does the change. Patch gives better controls 'coverted' metadata LV and reports less confusing message during conversion. Patch set also moves the code for updating min/max into pool_manip.c for better sharing with cache_pool code.
2021-01-12 19:59:29 +03:00
(length_crop && (length_crop == target_length))))
break; /* Keep target_params when matching segment is found */
target_params = NULL; /* Marking this target_params unusable */
} while (target);
if (!target_name ||
!_get_segment_status_from_target_params(target_name, target_params, dminfo, seg_status))
stack;
}
2002-03-15 00:17:30 +03:00
r = 1;
out:
dm_task_destroy(dmt);
2016-12-01 16:53:35 +03:00
2002-03-15 00:17:30 +03:00
return r;
}
2002-02-25 17:46:57 +03:00
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
/*
* ignore_blocked_mirror_devices
* @dev
* @start
* @length
* @mirror_status_str
*
* When a DM 'mirror' target is created with 'block_on_error' or
* 'handle_errors', it will block I/O if there is a device failure
* until the mirror is reconfigured. Thus, LVM should never attempt
* to read labels from a mirror that has a failed device. (LVM
* commands are issued to repair mirrors; and if LVM is blocked
* attempting to read a mirror, a circular dependency would be created.)
*
* This function is a slimmed-down version of lib/mirror/mirrored.c:
mirror: Avoid reading mirrors with failed devices in mirrored log Commit 9fd7ac7d035f0b2f8dcc3cb19935eb181816bd76 did not handle mirrors that contained mirrored logs. This is because the status line of the mirror does not give an indication of the health of the mirrored log, as you can see here: [root@bp-01 lvm2]# dmsetup status vg-lv vg-lv_mlog vg-lv: 0 409600 mirror 2 253:6 253:7 400/400 1 AA 3 disk 253:5 A vg-lv_mlog: 0 8192 mirror 2 253:3 253:4 7/8 1 AD 1 core Thus, the possibility for LVM commands to hang still persists when mirror have mirrored logs. I discovered this while performing some testing that does polling with 'pvs' while doing I/O and killing devices. The 'pvs' managed to get between the mirrored log device failure and the attempt by dmeventd to repair it. The result was a very nasty block in LVM commands that is very difficult to remove - even for someone who knows what is going on. Thus, it is absolutely essential that the log of a mirror be recursively checked for mirror devices which may be failed as well. Despite what the code comment says in the aforementioned commit... + * _mirrored_transient_status(). FIXME: It is unable to handle mirrors + * with mirrored logs because it does not have a way to get the status of + * the mirror that forms the log, which could be blocked. ... it is possible to get the status of the log because the log device major/minor is given to us by the status output of the top-level mirror. We can use that to query the log device for any DM status and see if it is a mirror that needs to be bypassed. This patch does just that and is now able to avoid reading from mirrors that have failed devices in a mirrored log.
2012-10-25 09:42:45 +04:00
* _mirrored_transient_status().
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
*
* If a failed device is detected in the status string, then it must be
* determined if 'block_on_error' or 'handle_errors' was used when
* creating the mirror. This info can only be determined from the mirror
* table. The 'dev', 'start', 'length' trio allow us to correlate the
* 'mirror_status_str' with the correct device table in order to check
* for blocking.
*
* Returns: 1 if mirror should be ignored, 0 if safe to use
*/
static int _ignore_blocked_mirror_devices(struct cmd_context *cmd,
struct device *dev,
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
uint64_t start, uint64_t length,
char *mirror_status_str)
{
struct dm_pool *mem;
struct dm_status_mirror *sm;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
unsigned i, check_for_blocking = 0;
uint64_t s,l;
char *p, *params, *target_type = NULL;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
void *next = NULL;
struct dm_task *dmt = NULL;
int r = 0;
struct device *tmp_dev;
char buf[16];
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
if (!(mem = dm_pool_create("blocked_mirrors", 128)))
return_0;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
if (!dm_get_status_mirror(mem, mirror_status_str, &sm))
goto_out;
for (i = 0; i < sm->dev_count; ++i)
if (sm->devs[i].health != DM_STATUS_MIRROR_ALIVE) {
2016-12-01 16:53:35 +03:00
log_debug_activation("%s: Mirror image %d marked as failed.",
dev_name(dev), i);
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
check_for_blocking = 1;
}
if (!check_for_blocking && sm->log_count) {
if (sm->logs[0].health != DM_STATUS_MIRROR_ALIVE) {
2016-12-01 16:53:35 +03:00
log_debug_activation("%s: Mirror log device marked as failed.",
dev_name(dev));
mirror: Avoid reading mirrors with failed devices in mirrored log Commit 9fd7ac7d035f0b2f8dcc3cb19935eb181816bd76 did not handle mirrors that contained mirrored logs. This is because the status line of the mirror does not give an indication of the health of the mirrored log, as you can see here: [root@bp-01 lvm2]# dmsetup status vg-lv vg-lv_mlog vg-lv: 0 409600 mirror 2 253:6 253:7 400/400 1 AA 3 disk 253:5 A vg-lv_mlog: 0 8192 mirror 2 253:3 253:4 7/8 1 AD 1 core Thus, the possibility for LVM commands to hang still persists when mirror have mirrored logs. I discovered this while performing some testing that does polling with 'pvs' while doing I/O and killing devices. The 'pvs' managed to get between the mirrored log device failure and the attempt by dmeventd to repair it. The result was a very nasty block in LVM commands that is very difficult to remove - even for someone who knows what is going on. Thus, it is absolutely essential that the log of a mirror be recursively checked for mirror devices which may be failed as well. Despite what the code comment says in the aforementioned commit... + * _mirrored_transient_status(). FIXME: It is unable to handle mirrors + * with mirrored logs because it does not have a way to get the status of + * the mirror that forms the log, which could be blocked. ... it is possible to get the status of the log because the log device major/minor is given to us by the status output of the top-level mirror. We can use that to query the log device for any DM status and see if it is a mirror that needs to be bypassed. This patch does just that and is now able to avoid reading from mirrors that have failed devices in a mirrored log.
2012-10-25 09:42:45 +04:00
check_for_blocking = 1;
} else {
if (dm_snprintf(buf, sizeof(buf), "%u:%u",
sm->logs[0].major, sm->logs[0].minor) < 0)
mirror: Avoid reading mirrors with failed devices in mirrored log Commit 9fd7ac7d035f0b2f8dcc3cb19935eb181816bd76 did not handle mirrors that contained mirrored logs. This is because the status line of the mirror does not give an indication of the health of the mirrored log, as you can see here: [root@bp-01 lvm2]# dmsetup status vg-lv vg-lv_mlog vg-lv: 0 409600 mirror 2 253:6 253:7 400/400 1 AA 3 disk 253:5 A vg-lv_mlog: 0 8192 mirror 2 253:3 253:4 7/8 1 AD 1 core Thus, the possibility for LVM commands to hang still persists when mirror have mirrored logs. I discovered this while performing some testing that does polling with 'pvs' while doing I/O and killing devices. The 'pvs' managed to get between the mirrored log device failure and the attempt by dmeventd to repair it. The result was a very nasty block in LVM commands that is very difficult to remove - even for someone who knows what is going on. Thus, it is absolutely essential that the log of a mirror be recursively checked for mirror devices which may be failed as well. Despite what the code comment says in the aforementioned commit... + * _mirrored_transient_status(). FIXME: It is unable to handle mirrors + * with mirrored logs because it does not have a way to get the status of + * the mirror that forms the log, which could be blocked. ... it is possible to get the status of the log because the log device major/minor is given to us by the status output of the top-level mirror. We can use that to query the log device for any DM status and see if it is a mirror that needs to be bypassed. This patch does just that and is now able to avoid reading from mirrors that have failed devices in a mirrored log.
2012-10-25 09:42:45 +04:00
goto_out;
if (!(tmp_dev = dev_create_file(buf, NULL, NULL, 0)))
mirror: Avoid reading mirrors with failed devices in mirrored log Commit 9fd7ac7d035f0b2f8dcc3cb19935eb181816bd76 did not handle mirrors that contained mirrored logs. This is because the status line of the mirror does not give an indication of the health of the mirrored log, as you can see here: [root@bp-01 lvm2]# dmsetup status vg-lv vg-lv_mlog vg-lv: 0 409600 mirror 2 253:6 253:7 400/400 1 AA 3 disk 253:5 A vg-lv_mlog: 0 8192 mirror 2 253:3 253:4 7/8 1 AD 1 core Thus, the possibility for LVM commands to hang still persists when mirror have mirrored logs. I discovered this while performing some testing that does polling with 'pvs' while doing I/O and killing devices. The 'pvs' managed to get between the mirrored log device failure and the attempt by dmeventd to repair it. The result was a very nasty block in LVM commands that is very difficult to remove - even for someone who knows what is going on. Thus, it is absolutely essential that the log of a mirror be recursively checked for mirror devices which may be failed as well. Despite what the code comment says in the aforementioned commit... + * _mirrored_transient_status(). FIXME: It is unable to handle mirrors + * with mirrored logs because it does not have a way to get the status of + * the mirror that forms the log, which could be blocked. ... it is possible to get the status of the log because the log device major/minor is given to us by the status output of the top-level mirror. We can use that to query the log device for any DM status and see if it is a mirror that needs to be bypassed. This patch does just that and is now able to avoid reading from mirrors that have failed devices in a mirrored log.
2012-10-25 09:42:45 +04:00
goto_out;
tmp_dev->dev = MKDEV(sm->logs[0].major, sm->logs[0].minor);
if (dm_device_is_usable(cmd, tmp_dev, (struct dev_usable_check_params)
{ .check_empty = 1,
.check_blocked = 1,
.check_suspended = ignore_suspended_devices(),
.check_error_target = 1,
.check_reserved = 0 }, NULL))
goto out; /* safe to use */
stack;
mirror: Avoid reading mirrors with failed devices in mirrored log Commit 9fd7ac7d035f0b2f8dcc3cb19935eb181816bd76 did not handle mirrors that contained mirrored logs. This is because the status line of the mirror does not give an indication of the health of the mirrored log, as you can see here: [root@bp-01 lvm2]# dmsetup status vg-lv vg-lv_mlog vg-lv: 0 409600 mirror 2 253:6 253:7 400/400 1 AA 3 disk 253:5 A vg-lv_mlog: 0 8192 mirror 2 253:3 253:4 7/8 1 AD 1 core Thus, the possibility for LVM commands to hang still persists when mirror have mirrored logs. I discovered this while performing some testing that does polling with 'pvs' while doing I/O and killing devices. The 'pvs' managed to get between the mirrored log device failure and the attempt by dmeventd to repair it. The result was a very nasty block in LVM commands that is very difficult to remove - even for someone who knows what is going on. Thus, it is absolutely essential that the log of a mirror be recursively checked for mirror devices which may be failed as well. Despite what the code comment says in the aforementioned commit... + * _mirrored_transient_status(). FIXME: It is unable to handle mirrors + * with mirrored logs because it does not have a way to get the status of + * the mirror that forms the log, which could be blocked. ... it is possible to get the status of the log because the log device major/minor is given to us by the status output of the top-level mirror. We can use that to query the log device for any DM status and see if it is a mirror that needs to be bypassed. This patch does just that and is now able to avoid reading from mirrors that have failed devices in a mirrored log.
2012-10-25 09:42:45 +04:00
}
}
if (!check_for_blocking) {
r = 1;
goto out;
}
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
/*
* We avoid another system call if we can, but if a device is
* dead, we have no choice but to look up the table too.
*/
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
goto_out;
do {
next = dm_get_next_target(dmt, next, &s, &l,
&target_type, &params);
if ((s == start) && (l == length) &&
target_type && params) {
if (strcmp(target_type, TARGET_NAME_MIRROR))
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
goto_out;
if (((p = strstr(params, " block_on_error")) &&
(p[15] == '\0' || p[15] == ' ')) ||
((p = strstr(params, " handle_errors")) &&
(p[14] == '\0' || p[14] == ' '))) {
2016-12-01 16:53:35 +03:00
log_debug_activation("%s: I/O blocked to mirror device.",
dev_name(dev));
goto out;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
}
}
} while (next);
r = 1;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
out:
if (dmt)
dm_task_destroy(dmt);
dm_pool_destroy(mem);
return r;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
}
static int _device_is_suspended(int major, int minor)
{
struct dm_task *dmt;
struct dm_info info;
if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info,
NULL, NULL, NULL,
major, minor, 0, 0, 0)))
return_0;
dm_task_destroy(dmt);
return (info.exists && info.suspended);
}
static int _ignore_suspended_snapshot_component(struct device *dev)
{
struct dm_task *dmt;
void *next = NULL;
char *params, *target_type = NULL;
uint64_t start, length;
int major1, minor1, major2, minor2;
int r = 0;
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL,
NULL, NULL, NULL,
MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
return_0;
do {
next = dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type)
continue;
if (!strcmp(target_type, TARGET_NAME_SNAPSHOT)) {
if (!params || sscanf(params, "%d:%d %d:%d", &major1, &minor1, &major2, &minor2) != 4) {
log_warn("WARNING: Incorrect snapshot table found for %u:%u.",
MAJOR(dev->dev), MINOR(dev->dev));
goto out;
}
r = r || _device_is_suspended(major1, minor1) || _device_is_suspended(major2, minor2);
} else if (!strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) {
if (!params || sscanf(params, "%d:%d", &major1, &minor1) != 2) {
log_warn("WARNING: Incorrect snapshot-origin table found for %u:%u.",
MAJOR(dev->dev), MINOR(dev->dev));
goto out;
}
r = r || _device_is_suspended(major1, minor1);
}
} while (next);
out:
dm_task_destroy(dmt);
2016-12-01 16:53:35 +03:00
return r;
}
static int _ignore_unusable_thins(struct device *dev)
{
/* TODO make function for thin testing */
struct dm_pool *mem;
struct dm_status_thin_pool *status;
struct dm_task *dmt = NULL;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
int minor, major;
int r = 0;
if (!(mem = dm_pool_create("unusable_thins", 128)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
goto_out;
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!params || sscanf(params, "%d:%d", &major, &minor) != 2) {
log_warn("WARNING: Cannot get thin-pool major:minor for thin device %u:%u.",
MAJOR(dev->dev), MINOR(dev->dev));
goto out;
}
dm_task_destroy(dmt);
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, NULL, NULL, NULL, NULL,
major, minor, 0, 0, 0)))
goto_out;
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!dm_get_status_thin_pool(mem, params, &status))
goto_out;
if (status->read_only || status->out_of_data_space) {
log_warn("WARNING: %s: Thin's thin-pool needs inspection.",
dev_name(dev));
goto out;
}
r = 1;
out:
if (dmt)
dm_task_destroy(dmt);
dm_pool_destroy(mem);
return r;
}
static int _ignore_invalid_snapshot(const char *params)
{
struct dm_status_snapshot *s;
struct dm_pool *mem;
int r = 0;
if (!(mem = dm_pool_create("invalid snapshots", 128)))
return_0;
if (!dm_get_status_snapshot(mem, params, &s))
stack;
else
r = s->invalid;
dm_pool_destroy(mem);
return r;
}
static int _ignore_frozen_raid(struct device *dev, const char *params)
{
struct dm_status_raid *s;
struct dm_pool *mem;
int r = 0;
if (!(mem = dm_pool_create("frozen raid", 128)))
return_0;
if (!dm_get_status_raid(mem, params, &s))
stack;
else if (s->sync_action && !strcmp(s->sync_action, "frozen")) {
log_warn("WARNING: %s frozen raid device (%u:%u) needs inspection.",
dev_name(dev), MAJOR(dev->dev), MINOR(dev->dev));
r = 1;
}
dm_pool_destroy(mem);
return r;
}
static int _is_usable_uuid(const struct device *dev, const char *name, const char *uuid, int check_reserved, int check_lv, int *is_lv)
{
char *vgname, *lvname, *layer;
char vg_name[NAME_LEN];
if (!check_reserved && !check_lv)
return 1;
if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) { /* with LVM- prefix */
if (check_reserved) {
/* Check internal lvm devices */
if (strlen(uuid) > (sizeof(UUID_PREFIX) + 2 * ID_LEN)) { /* 68 with suffix */
log_debug_activation("%s: Reserved uuid %s on internal LV device %s not usable.",
dev_name(dev), uuid, name);
return 0;
}
/* Recognize some older reserved LVs just from the LV name (snapshot, pvmove...) */
vgname = vg_name;
if (!_dm_strncpy(vg_name, name, sizeof(vg_name)) ||
!dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer))
return_0;
/* FIXME: fails to handle dev aliases i.e. /dev/dm-5, replace with UUID suffix */
if (lvname && (is_reserved_lvname(lvname) || *layer)) {
log_debug_activation("%s: Reserved internal LV device %s/%s%s%s not usable.",
dev_name(dev), vgname, lvname, *layer ? "-" : "", layer);
return 0;
}
}
if (check_lv) {
/* Skip LVs */
if (is_lv)
*is_lv = 1;
return 0;
}
}
if (check_reserved &&
(!strncmp(uuid, CRYPT_TEMP, sizeof(CRYPT_TEMP) - 1) ||
!strncmp(uuid, CRYPT_SUBDEV, sizeof(CRYPT_SUBDEV) - 1) ||
!strncmp(uuid, STRATIS, sizeof(STRATIS) - 1))) {
/* Skip private crypto devices */
log_debug_activation("%s: Reserved uuid %s on %s device %s not usable.",
dev_name(dev), uuid,
uuid[0] == 'C' ? "crypto" : "stratis",
name);
return 0;
}
return 1;
}
/*
* dm_device_is_usable
* @dev
* @check_lv_names
*
* A device is considered not usable if it is:
* 1) An empty device (no targets)
* 2) A blocked mirror (i.e. a mirror with a failure and block_on_error set)
* 3) ignore_suspended_devices is set and
* a) the device is suspended
* b) it is a snapshot origin
* 4) an error target
* 5) the LV name is a reserved name.
*
* Returns: 1 if usable, 0 otherwise
*/
int dm_device_is_usable(struct cmd_context *cmd, struct device *dev, struct dev_usable_check_params check, int *is_lv)
{
struct dm_task *dmt;
struct dm_info info;
const char *name, *uuid;
const struct dm_active_device *dm_dev;
2008-01-30 17:00:02 +03:00
uint64_t start, length;
char *target_type = NULL;
char *params;
2007-01-26 02:03:48 +03:00
void *next = NULL;
int only_error_or_zero_target = 1;
int r = 0;
if (dev_cache_use_dm_uuid_cache() &&
/* With cache we can avoid status calls for unusable UUIDs */
(dm_dev = dev_cache_get_dm_dev_by_devno(cmd, dev->dev)) &&
!_is_usable_uuid(dev, dm_dev->name, dm_dev->uuid, check.check_reserved, check.check_lv, is_lv))
return 0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, NULL, NULL,
MAJOR(dev->dev), MINOR(dev->dev), 0, 0, 0)))
return_0;
2010-08-09 18:05:16 +04:00
if (!info.exists)
goto out;
2007-01-26 02:03:48 +03:00
name = dm_task_get_name(dmt);
uuid = dm_task_get_uuid(dmt);
2007-01-26 02:03:48 +03:00
if (check.check_empty && !info.target_count) {
log_debug_activation("%s: Empty device %s not usable.", dev_name(dev), name);
goto out;
}
if (check.check_suspended && info.suspended) {
log_debug_activation("%s: Suspended device %s not usable.", dev_name(dev), name);
2010-08-09 18:05:16 +04:00
goto out;
}
if (uuid &&
!_is_usable_uuid(dev, name, uuid, check.check_reserved, check.check_lv, is_lv))
goto out;
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
/* FIXME Also check for mpath no paths */
2008-01-30 17:00:02 +03:00
do {
next = dm_get_next_target(dmt, next, &start, &length,
&target_type, &params);
mirror: Avoid reading from mirrors that have failed devices Addresses: rhbz855398 (Allow VGs to be built on cluster mirrors), and other issues. The LVM code attempts to avoid reading labels from devices that are suspended to try to avoid situations that may cause the commands to block indefinitely. When scanning devices, 'ignore_suspended_devices' can be set so the code (lib/activate/dev_manager.c:device_is_usable()) checks any DM devices it finds and avoids them if they are suspended. The mirror target has an additional mechanism that can cause I/O to be blocked. If a device in a mirror fails, all I/O will be blocked by the kernel until a new table (a linear target or a mirror with replacement devices) is loaded. The mirror indicates that this condition has happened by marking a 'D' for the faulty device in its status output. This condition must also be checked by 'device_is_usable()' to avoid the possibility of blocking LVM commands indefinitely due to an attempt to read the blocked mirror for labels. Until now, mirrors were avoided if the 'ignore_suspended_devices' condition was set. This check seemed to suggest, "if we are concerned about suspended devices, then let's ignore mirrors altogether just in case". This is insufficient and doesn't solve any problems. All devices that are suspended are already avoided if 'ignore_suspended_devices' is set; and if a mirror is blocking because of an error condition, it will block the LVM command regardless of the setting of that variable. Rather than avoiding mirrors whenever 'ignore_suspended_devices' is set, this patch causes mirrors to be avoided whenever they are blocking due to an error. (As mentioned above, the case where a DM device is suspended is already covered.) This solves a number of issues that weren't handled before. For example, pvcreate (or any command that does a pv_read or vg_read, which eventually call device_is_usable()) will be protected from blocked mirrors regardless of how 'ignore_suspended_devices' is set. Additionally, a mirror that is neither suspended nor blocking is /allowed/ to be read regardless of how 'ignore_suspended_devices' is set. (The latter point being the source of the fix for rhbz855398.)
2012-10-24 08:10:33 +04:00
if (!target_type)
continue;
if (check.check_blocked && !strcmp(target_type, TARGET_NAME_MIRROR)) {
Mirror: Fix hangs and lock-ups caused by attempting label reads of mirrors There is a problem with the way mirrors have been designed to handle failures that is resulting in stuck LVM processes and hung I/O. When mirrors encounter a write failure, they block I/O and notify userspace to reconfigure the mirror to remove failed devices. This process is open to a couple races: 1) Any LVM process other than the one that is meant to deal with the mirror failure can attempt to read the mirror, fail, and block other LVM commands (including the repair command) from proceeding due to holding a lock on the volume group. 2) If there are multiple mirrors that suffer a failure in the same volume group, a repair can block while attempting to read the LVM label from one mirror while trying to repair the other. Mitigation of these races has been attempted by disallowing label reading of mirrors that are either suspended or are indicated as blocking by the kernel. While this has closed the window of opportunity for hitting the above problems considerably, it hasn't closed it completely. This is because it is still possible to start an LVM command, read the status of the mirror as healthy, and then perform the read for the label at the moment after a the failure is discovered by the kernel. I can see two solutions to this problem: 1) Allow users to configure whether mirrors can be candidates for LVM labels (i.e. whether PVs can be created on mirror LVs). If the user chooses to allow label scanning of mirror LVs, it will be at the expense of a possible hang in I/O or LVM processes. 2) Instrument a way to allow asynchronous label reading - allowing blocked label reads to be ignored while continuing to process the LVM command. This would action would allow LVM commands to continue even though they would have otherwise blocked trying to read a mirror. They can then release their lock and allow a repair command to commence. In the event of #2 above, the repair command already in progress can continue and repair the failed mirror. This patch brings solution #1. If solution #2 is developed later on, the configuration option created in #1 can be negated - allowing mirrors to be scanned for labels by default once again.
2013-10-23 04:14:33 +04:00
if (ignore_lvm_mirrors()) {
log_debug_activation("%s: Scanning mirror devices is disabled.", dev_name(dev));
goto out;
}
if (!_ignore_blocked_mirror_devices(cmd, dev, start,
Mirror: Fix hangs and lock-ups caused by attempting label reads of mirrors There is a problem with the way mirrors have been designed to handle failures that is resulting in stuck LVM processes and hung I/O. When mirrors encounter a write failure, they block I/O and notify userspace to reconfigure the mirror to remove failed devices. This process is open to a couple races: 1) Any LVM process other than the one that is meant to deal with the mirror failure can attempt to read the mirror, fail, and block other LVM commands (including the repair command) from proceeding due to holding a lock on the volume group. 2) If there are multiple mirrors that suffer a failure in the same volume group, a repair can block while attempting to read the LVM label from one mirror while trying to repair the other. Mitigation of these races has been attempted by disallowing label reading of mirrors that are either suspended or are indicated as blocking by the kernel. While this has closed the window of opportunity for hitting the above problems considerably, it hasn't closed it completely. This is because it is still possible to start an LVM command, read the status of the mirror as healthy, and then perform the read for the label at the moment after a the failure is discovered by the kernel. I can see two solutions to this problem: 1) Allow users to configure whether mirrors can be candidates for LVM labels (i.e. whether PVs can be created on mirror LVs). If the user chooses to allow label scanning of mirror LVs, it will be at the expense of a possible hang in I/O or LVM processes. 2) Instrument a way to allow asynchronous label reading - allowing blocked label reads to be ignored while continuing to process the LVM command. This would action would allow LVM commands to continue even though they would have otherwise blocked trying to read a mirror. They can then release their lock and allow a repair command to commence. In the event of #2 above, the repair command already in progress can continue and repair the failed mirror. This patch brings solution #1. If solution #2 is developed later on, the configuration option created in #1 can be negated - allowing mirrors to be scanned for labels by default once again.
2013-10-23 04:14:33 +04:00
length, params)) {
log_debug_activation("%s: Mirror device %s not usable.",
dev_name(dev), name);
goto out;
}
}
/*
dev_manager: do not mark snapshot origins as unusable devices just because of possible blocked mirror underneath At first, all snapshot-origins where marked as unusable unconditionally here, but we can't cut off whole snapshot-origin use in a stack just because of this possible mirror state. This whole "device_is_usable" check was even incorrectly part of persistent filter before commit a843d0d97c66aae1872c05b0f6cf4bda176aae2 (where filter cleanup was done). The persistent filter is used only if obtain_device_list_from_udev=0, which means that the former check for snapshot-origin here had not even been hit with default configuration for a few years before commit a843d0d97c66aae1872c05b0f6cf4bda176aae2 (the check for snapshot-origin and skipping of this LV was introduced with commit a71d6051ed3d72af6895733c599cc44b49f24dbb back in 2010). The obtain_device_list_from_udev=1 (and hence not using persistent filter and hence not hitting this check for snapshot-origins and skipping) has been in action since commit edcda01a1e18af6599275801a8237fe10112ed6f (that is 2011). So for 3 years this condition was not even checked with default configuration, making it superfluous. This all changed in 2014 with commit 8a843d0d97c66aae1872c05b0f6cf4bda176aae2 where "filter-usable" is introduced and since then all snapshot-origins have been marked as unusable more often than before and making snapshot-origins practically unusable in a stack. This patch removes this incorrect check from commit a71d6051ed3d72af6895733c599cc44b49f24dbb which caused snapshot-origins to be unusable more often recently. If we want to fix this eventually in a correct way, we need to look down the stack and if snapshot-origin is hit and there's a blocked mirror underneath, only then mark the device as unusable. But mirrors in stack are not supported anymore so it's questionable whether it's worth spending more time on this at all...
2015-01-09 13:24:16 +03:00
* FIXME: Snapshot origin could be sitting on top of a mirror
* which could be blocking I/O. We should add a check for the
* stack here and see if there's blocked mirror underneath.
* Currently, mirrors used as origin or snapshot is not
* supported anymore and in general using mirrors in a stack
* is disabled by default (with a warning that if enabled,
* it could cause various deadlocks).
* Similar situation can happen with RAID devices where
* a RAID device can be snapshotted.
* If one of the RAID legs are down and we're doing
* lvconvert --repair, there's a time period in which
* snapshot components are (besides other devs) suspended.
* See also https://bugzilla.redhat.com/show_bug.cgi?id=1219222
* for an example where this causes problems.
*
* This is a quick check for now, but replace it with more
* robust and better check that would check the stack
* correctly, not just snapshots but any cobimnation possible
* in a stack - use proper dm tree to check this instead.
*/
if (check.check_suspended &&
(!strcmp(target_type, TARGET_NAME_SNAPSHOT) || !strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) &&
_ignore_suspended_snapshot_component(dev)) {
log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (!strcmp(target_type, TARGET_NAME_SNAPSHOT) &&
_ignore_invalid_snapshot(params)) {
log_debug_activation("%s: Invalid %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (!strncmp(target_type, TARGET_NAME_RAID, 4) && _ignore_frozen_raid(dev, params)) {
log_debug_activation("%s: Frozen %s device %s not usable.",
dev_name(dev), target_type, name);
goto out;
}
/* TODO: extend check struct ? */
if (!strcmp(target_type, TARGET_NAME_THIN) &&
!_ignore_unusable_thins(dev)) {
log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (only_error_or_zero_target &&
strcmp(target_type, TARGET_NAME_ERROR) &&
strcmp(target_type, TARGET_NAME_ZERO))
only_error_or_zero_target = 0;
2008-01-30 17:00:02 +03:00
} while (next);
/* Skip devices consisting entirely of error or zero targets. */
/* FIXME Deal with device stacked above error targets? */
if (check.check_error_target && only_error_or_zero_target) {
log_debug_activation("%s: Error device %s not usable.",
dev_name(dev), name);
goto out;
}
/* FIXME Also check dependencies? */
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/* Read UUID from a given DM device into buf_uuid */
int devno_dm_uuid(struct cmd_context *cmd, int major, int minor,
char *uuid_buf, size_t uuid_buf_size)
{
struct dm_task *dmt;
struct dm_info info;
const struct dm_active_device *dm_dev;
const char *uuid;
int r = 0;
if (dev_cache_use_dm_uuid_cache()) {
if ((dm_dev = dev_cache_get_dm_dev_by_devno(cmd, MKDEV(major, minor)))) {
dm_strncpy(uuid_buf, dm_dev->uuid, uuid_buf_size);
return 1;
}
uuid_buf[0] = 0;
return 0;
}
if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info, NULL, NULL, NULL,
major, minor, 0, 0, 0)))
return_0;
if (info.exists && (uuid = dm_task_get_uuid(dmt)))
r = dm_strncpy(uuid_buf, uuid, uuid_buf_size);
dm_task_destroy(dmt);
return r;
}
int dev_dm_uuid(struct cmd_context *cmd, struct device *dev,
char *uuid_buf, size_t uuid_buf_size)
{
return devno_dm_uuid(cmd, MAJOR(dev->dev), MINOR(dev->dev),
uuid_buf, uuid_buf_size);
}
/*
* If active LVs were activated by a version of LVM2 before 2.02.00 we must
* perform additional checks to find them because they do not have the LVM-
* prefix on their dm uuids.
* As of 2.02.150, we've chosen to disable this compatibility arbitrarily if
* we're running kernel version 3 or above.
*/
#define MIN_KERNEL_MAJOR 3
static int _original_uuid_format_check_required(struct cmd_context *cmd)
{
static int _kernel_major = 0;
if (!_kernel_major) {
if ((sscanf(cmd->kernel_vsn, "%d", &_kernel_major) == 1) &&
(_kernel_major >= MIN_KERNEL_MAJOR))
log_debug_activation("Skipping checks for old devices without " UUID_PREFIX
" dm uuid prefix (kernel vsn %d >= %d).", _kernel_major, MIN_KERNEL_MAJOR);
else
_kernel_major = -1;
}
return (_kernel_major == -1);
}
static int _info(struct cmd_context *cmd,
const char *name, const char *dlid,
int with_open_count, int with_read_ahead, int with_name_check,
struct dm_info *dminfo, uint32_t *read_ahead,
struct lv_seg_status *seg_status)
{
char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN];
const char *suffix, *suffix_position;
const char *name_check = (with_name_check) ? name : NULL;
unsigned i = 0;
log_debug_activation("Getting device info for %s [%s].", name, dlid);
/* Check for dlid */
if (!_info_run(dlid, dminfo, read_ahead, seg_status, name_check,
with_open_count, with_read_ahead, 0, 0))
return_0;
if (dminfo->exists)
return 1;
/* Check for original version of dlid before the suffixes got added in 2.02.106 */
if ((suffix_position = strrchr(dlid, '-'))) {
while ((suffix = _uuid_suffix_list[i++])) {
if (strcmp(suffix_position + 1, suffix))
continue;
dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
if (!_info_run(old_style_dlid, dminfo, read_ahead, seg_status,
name_check, with_open_count, with_read_ahead,
0, 0))
return_0;
if (dminfo->exists)
return 1;
}
}
/* Must we still check for the pre-2.02.00 dm uuid format? */
if (!_original_uuid_format_check_required(cmd))
return 1;
/* Check for dlid before UUID_PREFIX was added */
if (!_info_run(dlid + sizeof(UUID_PREFIX) - 1, dminfo, read_ahead, seg_status,
name_check, with_open_count, with_read_ahead, 0, 0))
return_0;
return 1;
}
Allow dm-cache cache device to be standard LV If a single, standard LV is specified as the cache, use it directly instead of converting it into a cache-pool object with two separate LVs (for data and metadata). With a single LV as the cache, lvm will use blocks at the beginning for metadata, and the rest for data. Separate dm linear devices are set up to point at the metadata and data areas of the LV. These dm devs are given to the dm-cache target to use. The single LV cache cannot be resized without recreating it. If the --poolmetadata option is used to specify an LV for metadata, then a cache pool will be created (with separate LVs for data and metadata.) Usage: $ lvcreate -n main -L 128M vg /dev/loop0 $ lvcreate -n fast -L 64M vg /dev/loop1 $ lvs -a vg LV VG Attr LSize Type Devices main vg -wi-a----- 128.00m linear /dev/loop0(0) fast vg -wi-a----- 64.00m linear /dev/loop1(0) $ lvconvert --type cache --cachepool fast vg/main $ lvs -a vg LV VG Attr LSize Origin Pool Type Devices [fast] vg Cwi---C--- 64.00m linear /dev/loop1(0) main vg Cwi---C--- 128.00m [main_corig] [fast] cache main_corig(0) [main_corig] vg owi---C--- 128.00m linear /dev/loop0(0) $ lvchange -ay vg/main $ dmsetup ls vg-fast_cdata (253:4) vg-fast_cmeta (253:5) vg-main_corig (253:6) vg-main (253:24) vg-fast (253:3) $ dmsetup table vg-fast_cdata: 0 98304 linear 253:3 32768 vg-fast_cmeta: 0 32768 linear 253:3 0 vg-main_corig: 0 262144 linear 7:0 2048 vg-main: 0 262144 cache 253:5 253:4 253:6 128 2 metadata2 writethrough mq 0 vg-fast: 0 131072 linear 7:1 2048 $ lvchange -an vg/min $ lvconvert --splitcache vg/main $ lvs -a vg LV VG Attr LSize Type Devices fast vg -wi------- 64.00m linear /dev/loop1(0) main vg -wi------- 128.00m linear /dev/loop0(0)
2018-08-17 23:45:52 +03:00
int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor)
{
struct dm_task *dmt;
int r = 0;
log_verbose("Removing dm dev %u:%u", major, minor);
if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
return_0;
if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
log_error("Failed to set device number for remove %u:%u", major, minor);
goto out;
}
r = dm_task_run(dmt);
out:
dm_task_destroy(dmt);
return r;
}
static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
{
return _info_run(NULL, info, NULL, NULL, NULL, 0, 0, major, minor);
}
int dev_manager_check_prefix_dm_major_minor(uint32_t major, uint32_t minor, const char *prefix)
{
struct dm_task *dmt;
const char *uuid;
int r = 1;
if (!(dmt = _setup_task_run(DM_DEVICE_INFO, NULL, NULL, NULL, 0, major, minor, 0, 0, 0)))
return_0;
if (!(uuid = dm_task_get_uuid(dmt)) || strncasecmp(uuid, prefix, strlen(prefix)))
r = 0;
dm_task_destroy(dmt);
return r;
}
/*
* Get a list of active dm devices from the kernel.
* The 'devs' list contains a struct dm_active_device.
*/
int dev_manager_get_dm_active_devices(const char *prefix, struct dm_list **devs, unsigned *devs_features)
{
struct dm_task *dmt;
int r = 1;
if (!(dmt = _setup_task_run(DM_DEVICE_LIST, NULL, NULL, NULL, 0, 0, 0, 0, 0, 0)))
return_0;
if (!dm_task_get_device_list(dmt, devs, devs_features)) {
r = 0;
goto_out;
}
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_info(struct cmd_context *cmd,
const struct logical_volume *lv, const char *layer,
int with_open_count, int with_read_ahead, int with_name_check,
struct dm_info *dminfo, uint32_t *read_ahead,
struct lv_seg_status *seg_status)
{
char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN];
char *dlid, *name;
2016-12-01 16:59:02 +03:00
int r = 0;
if (!(name = dm_build_dm_name(cmd->mem, lv->vg->name, lv->name, layer)))
return_0;
2005-10-19 17:59:18 +04:00
2016-12-01 16:59:02 +03:00
if (!(dlid = build_dm_uuid(cmd->mem, lv, layer)))
goto_out;
2005-10-19 17:59:18 +04:00
dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
if (dev_cache_use_dm_uuid_cache() &&
!dev_cache_get_dm_dev_by_uuid(cmd, dlid) &&
!dev_cache_get_dm_dev_by_uuid(cmd, old_style_dlid)) {
log_debug("Cached as inactive %s.", name);
if (dminfo)
memset(dminfo, 0, sizeof(*dminfo));
r = 1;
goto out;
}
if (!(r = _info(cmd, name, dlid,
with_open_count, with_read_ahead, with_name_check,
dminfo, read_ahead, seg_status)))
stack;
out:
dm_pool_free(cmd->mem, name);
return r;
}
static struct dm_tree_node *_cached_dm_tree_node(struct dm_pool *mem,
struct dm_tree *dtree,
const struct logical_volume *lv,
const char *layer)
{
struct dm_tree_node *dnode;
char *dlid;
if (!(dlid = build_dm_uuid(mem, lv, layer)))
return_NULL;
dnode = dm_tree_find_node_by_uuid(dtree, dlid);
dm_pool_free(mem, dlid);
return dnode;
}
static const struct dm_info *_cached_dm_info(struct dm_pool *mem,
struct dm_tree *dtree,
const struct logical_volume *lv,
const char *layer)
{
const struct dm_tree_node *dnode;
const struct dm_info *dinfo = NULL;
if (!(dnode = _cached_dm_tree_node(mem, dtree, lv, layer)))
return NULL;
if (!(dinfo = dm_tree_node_get_info(dnode))) {
log_warn("WARNING: Cannot get info from tree node for %s.",
display_lvname(lv));
return NULL;
}
if (!dinfo->exists)
dinfo = NULL;
return dinfo;
}
int lv_has_target_type(struct dm_pool *mem, const struct logical_volume *lv,
const char *layer, const char *target_type)
{
int r = 0;
char *dlid;
struct dm_task *dmt;
struct dm_info info;
void *next = NULL;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
if (!(dlid = build_dm_uuid(mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
goto_bad;
if (!info.exists)
goto_out;
/* If there is a preloaded table, use that in preference. */
if (info.inactive_table) {
dm_task_destroy(dmt);
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 1)))
goto_bad;
if (!info.exists || !info.inactive_table)
goto_out;
}
do {
next = dm_get_next_target(dmt, next, &start, &length,
&type, &params);
if (type && !strncmp(type, target_type, strlen(target_type))) {
r = 1;
break;
}
} while (next);
out:
dm_task_destroy(dmt);
bad:
dm_pool_free(mem, dlid);
return r;
}
static int _lv_has_thin_device_id(struct dm_pool *mem, const struct logical_volume *lv,
const char *layer, unsigned device_id)
{
char *dlid;
struct dm_task *dmt;
struct dm_info info;
void *next = NULL;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
unsigned id = ~0;
if (!(dlid = build_dm_uuid(mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0)))
goto_bad;
if (!info.exists)
goto_out;
/* If there is a preloaded table, use that in preference. */
if (info.inactive_table) {
dm_task_destroy(dmt);
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 1)))
goto_bad;
if (!info.exists || !info.inactive_table)
goto_out;
}
(void) dm_get_next_target(dmt, next, &start, &length, &type, &params);
if (!type || strcmp(type, TARGET_NAME_THIN))
goto_out;
if (!params || sscanf(params, "%*u:%*u %u", &id) != 1)
goto_out;
log_debug_activation("%soaded thin volume %s with id %u is %smatching id %u.",
info.inactive_table ? "Prel" : "L",
display_lvname(lv), id,
(device_id != id) ? "not " : "", device_id);
out:
dm_task_destroy(dmt);
bad:
dm_pool_free(mem, dlid);
return (device_id == id);
}
2016-12-01 16:53:35 +03:00
int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size, uint32_t extent_size,
int use_linear_target, const char *vgname, const char *lvname)
{
uint32_t page_size;
/*
* Use striped or linear target?
*/
if (!use_linear_target) {
page_size = lvm_getpagesize() >> SECTOR_SHIFT;
/*
* We'll use the extent size as the stripe size.
* Extent size and page size are always powers of 2.
* The striped target requires that the stripe size is
* divisible by the page size.
*/
if (extent_size >= page_size) {
/* Use striped target */
if (!dm_tree_node_add_striped_target(node, size, extent_size))
return_0;
return 1;
2016-12-01 16:53:35 +03:00
}
/* Some exotic cases are unsupported by striped. */
log_warn("WARNING: Using linear target for %s/%s: Striped requires extent size "
"(" FMTu32 " sectors) >= page size (" FMTu32 ").",
vgname, lvname, extent_size, page_size);
}
/*
* Use linear target.
*/
if (!dm_tree_node_add_linear_target(node, size))
return_0;
return 1;
}
static dm_percent_range_t _combine_percent(dm_percent_t a, dm_percent_t b,
uint32_t numerator, uint32_t denominator)
{
if (a == LVM_PERCENT_MERGE_FAILED || b == LVM_PERCENT_MERGE_FAILED)
return LVM_PERCENT_MERGE_FAILED;
if (a == DM_PERCENT_INVALID || b == DM_PERCENT_INVALID)
return DM_PERCENT_INVALID;
if (a == DM_PERCENT_100 && b == DM_PERCENT_100)
return DM_PERCENT_100;
if (a == DM_PERCENT_0 && b == DM_PERCENT_0)
return DM_PERCENT_0;
return (dm_percent_range_t) dm_make_percent(numerator, denominator);
}
static int _percent_run(struct dev_manager *dm, const char *name,
2005-10-19 17:59:18 +04:00
const char *dlid,
const char *target_type, int wait,
const struct logical_volume *lv, dm_percent_t *overall_percent,
uint32_t *event_nr, int fail_if_percent_unsupported,
int *interrupted)
{
int r = 0;
struct dm_task *dmt;
2003-04-30 19:26:25 +04:00
struct dm_info info;
void *next = NULL;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
const struct dm_list *segh = lv ? &lv->segments : NULL;
struct lv_segment *seg = NULL;
int first_time = 1;
dm_percent_t percent = DM_PERCENT_INVALID;
uint64_t total_numerator = 0, total_denominator = 0;
struct segment_type *segtype;
*overall_percent = percent;
if (!(segtype = get_segtype_from_string(dm->cmd, target_type)))
return_0;
if (wait)
sigint_allow();
if (!(dmt = _setup_task_run(wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, &info,
name, dlid, event_nr, 0, 0, 0, 0, 0)))
goto_bad;
if (!info.exists)
goto_out;
2003-04-30 19:26:25 +04:00
if (event_nr)
*event_nr = info.event_nr;
do {
next = dm_get_next_target(dmt, next, &start, &length, &type,
&params);
if (lv) {
if (!(segh = dm_list_next(&lv->segments, segh))) {
log_error("Number of segments in active LV %s "
"does not match metadata.",
display_lvname(lv));
goto out;
}
seg = dm_list_item(segh, struct lv_segment);
}
if (!type || !params)
continue;
if (strcmp(type, target_type)) {
/* If kernel's type isn't an exact match is it compatible? */
if (!segtype->ops->target_status_compatible ||
!segtype->ops->target_status_compatible(type))
continue;
}
if (!segtype->ops->target_percent)
continue;
if (!segtype->ops->target_percent(&dm->target_state,
&percent, dm->mem,
dm->cmd, seg, params,
2004-05-05 01:25:57 +04:00
&total_numerator,
&total_denominator))
goto_out;
2004-05-05 01:25:57 +04:00
if (first_time) {
*overall_percent = percent;
first_time = 0;
} else
*overall_percent =
_combine_percent(*overall_percent, percent,
total_numerator, total_denominator);
} while (next);
if (lv && dm_list_next(&lv->segments, segh)) {
log_error("Number of segments in active LV %s does not "
"match metadata.", display_lvname(lv));
goto out;
}
if (first_time) {
/* above ->target_percent() was not executed! */
/* FIXME why return PERCENT_100 et. al. in this case? */
*overall_percent = DM_PERCENT_100;
if (fail_if_percent_unsupported)
goto_out;
}
log_debug_activation("LV percent: %s",
display_percent(dm->cmd, *overall_percent));
r = 1;
out:
dm_task_destroy(dmt);
bad:
if (wait) {
sigint_restore();
if (sigint_caught()) {
*interrupted = 1;
return_0;
}
}
return r;
}
2005-10-19 17:59:18 +04:00
static int _percent(struct dev_manager *dm, const char *name, const char *dlid,
const char *target_type, int wait,
const struct logical_volume *lv, dm_percent_t *percent,
uint32_t *event_nr, int fail_if_percent_unsupported)
{
int interrupted = 0;
if (dlid && *dlid) {
if (_percent_run(dm, NULL, dlid, target_type, wait, lv, percent,
event_nr, fail_if_percent_unsupported, &interrupted))
return 1;
if (!interrupted &&
_original_uuid_format_check_required(dm->cmd) &&
_percent_run(dm, NULL, dlid + sizeof(UUID_PREFIX) - 1,
target_type, wait, lv, percent,
event_nr, fail_if_percent_unsupported, &interrupted))
return 1;
}
if (!interrupted && name &&
_percent_run(dm, name, NULL, target_type, wait, lv, percent,
event_nr, fail_if_percent_unsupported, &interrupted))
return 1;
return_0;
}
/* FIXME Merge with the percent function */
int dev_manager_transient(struct dev_manager *dm, const struct logical_volume *lv)
{
int r = 0;
struct dm_task *dmt;
struct dm_info info;
void *next = NULL;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
char *dlid = NULL;
2013-02-02 03:16:36 +04:00
const char *layer = lv_layer(lv);
const struct dm_list *segh = &lv->segments;
struct lv_segment *seg = NULL;
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, NULL, 0, 0, 0, 0, 0)))
return_0;
if (!info.exists)
goto_out;
do {
next = dm_get_next_target(dmt, next, &start, &length, &type,
&params);
if (!(segh = dm_list_next(&lv->segments, segh))) {
log_error("Number of segments in active LV %s "
"does not match metadata.", display_lvname(lv));
goto out;
}
seg = dm_list_item(segh, struct lv_segment);
if (!type || !params)
continue;
if (!seg) {
log_error(INTERNAL_ERROR "Segment is not selected.");
goto out;
}
if (seg->segtype->ops->check_transient_status &&
!seg->segtype->ops->check_transient_status(dm->mem, seg, params))
goto_out;
} while (next);
if (dm_list_next(&lv->segments, segh)) {
log_error("Number of segments in active LV %s does not "
"match metadata.", display_lvname(lv));
goto out;
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/*
* dev_manager implementation.
*/
struct dev_manager *dev_manager_create(struct cmd_context *cmd,
const char *vg_name,
unsigned track_pvmove_deps)
2002-03-19 02:25:50 +03:00
{
struct dm_pool *mem;
struct dev_manager *dm;
2002-03-19 02:25:50 +03:00
2008-01-30 16:19:47 +03:00
if (!(mem = dm_pool_create("dev_manager", 16 * 1024)))
return_NULL;
2002-03-19 02:25:50 +03:00
if (!(dm = dm_pool_zalloc(mem, sizeof(*dm))))
goto_bad;
2002-03-19 02:25:50 +03:00
dm->cmd = cmd;
dm->mem = mem;
dm->vg_name = vg_name;
/*
* When we manipulate (normally suspend/resume) the PVMOVE
* device directly, there's no need to touch the LVs above.
*/
dm->track_pvmove_deps = track_pvmove_deps;
dm->target_state = NULL;
2009-08-04 19:36:13 +04:00
dm_udev_set_sync_support(cmd->current_settings.udev_sync);
return dm;
bad:
dm_pool_destroy(mem);
2016-12-01 16:53:35 +03:00
return NULL;
}
void dev_manager_destroy(struct dev_manager *dm)
{
dm_pool_destroy(dm->mem);
}
void dev_manager_release(void)
{
dm_lib_release();
}
void dev_manager_exit(void)
{
dm_lib_exit();
}
int dev_manager_snapshot_percent(struct dev_manager *dm,
2006-04-06 18:06:27 +04:00
const struct logical_volume *lv,
dm_percent_t *percent)
{
const struct logical_volume *snap_lv;
char *name;
const char *dlid;
int fail_if_percent_unsupported = 0;
if (lv_is_merging_origin(lv)) {
/*
* Set 'fail_if_percent_unsupported', otherwise passing
* unsupported LV types to _percent will lead to a default
* successful return with percent_range as PERCENT_100.
* - For a merging origin, this will result in a polldaemon
* that runs infinitely (because completion is PERCENT_0)
* - We unfortunately don't yet _know_ if a snapshot-merge
* target is active (activation is deferred if dev is open);
* so we can't short-circuit origin devices based purely on
* existing LVM LV attributes.
*/
fail_if_percent_unsupported = 1;
}
if (lv_is_merging_cow(lv)) {
/* must check percent of origin for a merging snapshot */
snap_lv = origin_from_cow(lv);
} else
snap_lv = lv;
/*
* Build a name for the top layer.
*/
if (!(name = dm_build_dm_name(dm->mem, snap_lv->vg->name, snap_lv->name, NULL)))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, snap_lv, NULL)))
return_0;
/*
* Try and get some info on this device.
*/
if (!_percent(dm, name, dlid, TARGET_NAME_SNAPSHOT, 0, NULL, percent,
NULL, fail_if_percent_unsupported))
2008-01-30 16:19:47 +03:00
return_0;
/* If the snapshot isn't available, percent will be -1 */
return 1;
}
/* FIXME Merge with snapshot_percent, auto-detecting target type */
/* FIXME Cope with more than one target */
int dev_manager_mirror_percent(struct dev_manager *dm,
const struct logical_volume *lv, int wait,
dm_percent_t *percent, uint32_t *event_nr)
2002-02-25 17:46:57 +03:00
{
char *name;
const char *dlid;
const char *target_type = first_seg(lv)->segtype->name;
2013-02-02 03:16:36 +04:00
const char *layer = lv_layer(lv);
2002-02-25 17:46:57 +03:00
2002-03-26 16:41:37 +03:00
/*
* Build a name for the top layer.
2002-03-26 16:41:37 +03:00
*/
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
return_0;
2002-03-15 00:17:30 +03:00
2016-12-01 16:53:35 +03:00
log_debug_activation("Getting device %s status percentage for %s.",
target_type, name);
2016-12-01 16:53:35 +03:00
if (!_percent(dm, name, dlid, target_type, wait, lv, percent, event_nr, 0))
2008-01-30 16:19:47 +03:00
return_0;
2002-03-15 00:17:30 +03:00
return 1;
}
int dev_manager_raid_status(struct dev_manager *dm,
const struct logical_volume *lv,
struct lv_status_raid **status, int *exists)
{
int r = 0;
const char *dlid;
struct dm_task *dmt;
struct dm_info info;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
const char *layer = lv_layer(lv);
struct dm_status_raid *sr;
*exists = -1;
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking raid status for volume %s.",
display_lvname(lv));
dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
2016-12-01 16:53:35 +03:00
if (!type || strcmp(type, TARGET_NAME_RAID)) {
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_RAID, type ? type : "NULL");
goto out;
}
/* FIXME Check there's only one target */
if (!dm_get_status_raid(dm->mem, params, &sr))
goto_out;
(*status)->mem = dm->mem; /* User has to destroy this mem pool later */
(*status)->raid = sr;
(*status)->in_sync = dm_make_percent(sr->insync_regions, sr->total_regions);
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
int dev_manager_raid_message(struct dev_manager *dm,
const struct logical_volume *lv,
const char *msg)
{
int r = 0;
const char *dlid;
struct dm_task *dmt;
const char *layer = lv_layer(lv);
if (!lv_is_raid(lv)) {
2016-12-01 16:53:35 +03:00
log_error(INTERNAL_ERROR "%s is not a RAID logical volume.",
display_lvname(lv));
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
return 0;
}
/* These are the supported RAID messages for dm-raid v1.9.0 */
if (strcmp(msg, "idle") &&
strcmp(msg, "frozen") &&
strcmp(msg, "resync") &&
strcmp(msg, "recover") &&
strcmp(msg, "check") &&
strcmp(msg, "repair")) {
2016-12-01 16:53:35 +03:00
log_error(INTERNAL_ERROR "Unknown RAID message: %s.", msg);
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
return 0;
}
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 1, 0)))
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
return_0;
if (!dm_task_set_message(dmt, msg))
goto_out;
if (!dm_task_run(dmt))
goto_out;
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_writecache_message(struct dev_manager *dm,
const struct logical_volume *lv,
const char *msg)
{
int r = 0;
const char *dlid;
struct dm_task *dmt;
const char *layer = lv_layer(lv);
if (!lv_is_writecache(lv)) {
log_error(INTERNAL_ERROR "%s is not a writecache logical volume.",
display_lvname(lv));
return 0;
}
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 1, 0)))
return_0;
if (!dm_task_set_message(dmt, msg))
goto_out;
if (!dm_task_run(dmt))
goto_out;
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_cache_status(struct dev_manager *dm,
const struct logical_volume *lv,
struct lv_status_cache **status, int *exists)
{
int r = 0;
const char *dlid;
struct dm_task *dmt;
struct dm_info info;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
struct dm_status_cache *c;
*exists = -1;
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking status for cache volume %s.",
display_lvname(lv));
dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
if (!type || strcmp(type, TARGET_NAME_CACHE)) {
2016-12-01 16:53:35 +03:00
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_CACHE, type ? type : "NULL");
goto out;
}
/*
* FIXME:
* ->target_percent() API is able to transfer only a single value.
* Needs to be able to pass whole structure.
*/
2016-03-30 11:16:09 +03:00
if (!dm_get_status_cache(dm->mem, params, &c))
goto_out;
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
goto_out;
2014-11-06 22:36:53 +03:00
(*status)->mem = dm->mem; /* User has to destroy this mem pool later */
(*status)->cache = c;
if (c->fail || c->error) {
(*status)->data_usage =
(*status)->metadata_usage =
(*status)->dirty_usage = DM_PERCENT_INVALID;
} else {
(*status)->data_usage = dm_make_percent(c->used_blocks,
c->total_blocks);
(*status)->metadata_usage = dm_make_percent(c->metadata_used_blocks,
c->metadata_total_blocks);
(*status)->dirty_usage = (c->used_blocks) ?
dm_make_percent(c->dirty_blocks,
c->used_blocks) : DM_PERCENT_0;
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_thin_pool_status(struct dev_manager *dm,
const struct logical_volume *lv, int flush,
struct lv_status_thin_pool **status, int *exists)
{
struct dm_status_thin_pool *dm_status;
const char *dlid;
struct dm_task *dmt;
struct dm_info info;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
int r = 0;
*exists = -1;
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin_pool))))
return_0;
/* Build dlid for the thin pool layer */
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking thin pool status for LV %s.",
display_lvname(lv));
dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
if (!type || strcmp(type, TARGET_NAME_THIN_POOL)) {
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_THIN_POOL, type ? type : "NULL");
goto out;
}
if (!dm_get_status_thin_pool(dm->mem, params, &dm_status))
goto_out;
(*status)->mem = dm->mem;
(*status)->thin_pool = dm_status;
if (dm_status->fail || dm_status->error) {
(*status)->data_usage =
(*status)->metadata_usage = DM_PERCENT_INVALID;
} else {
(*status)->data_usage = dm_make_percent(dm_status->used_data_blocks,
dm_status->total_data_blocks);
(*status)->metadata_usage = dm_make_percent(dm_status->used_metadata_blocks,
dm_status->total_metadata_blocks);
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_thin_status(struct dev_manager *dm,
const struct logical_volume *lv, int flush,
struct lv_status_thin **status, int *exists)
{
struct dm_status_thin *dm_status;
const char *dlid;
struct dm_task *dmt;
struct dm_info info;
uint64_t start, length;
char *type = NULL;
char *params = NULL;
uint64_t csize;
int r = 0;
*exists = -1;
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin))))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking thin status for LV %s.",
display_lvname(lv));
dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
if (!type || strcmp(type, TARGET_NAME_THIN)) {
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_THIN, type ? type : "NULL");
goto out;
}
if (!dm_get_status_thin(dm->mem, params, &dm_status))
goto_out;
(*status)->mem = dm->mem;
(*status)->thin = dm_status;
if (dm_status->fail)
(*status)->usage = DM_PERCENT_INVALID;
else {
/* Pool allocates whole chunk so round-up to nearest one */
csize = first_seg(first_seg(lv)->pool_lv)->chunk_size;
csize = ((lv->size + csize - 1) / csize) * csize;
if (dm_status->mapped_sectors > csize) {
log_warn("WARNING: LV %s maps %s while the size is only %s.",
display_lvname(lv),
display_size(dm->cmd, dm_status->mapped_sectors),
display_size(dm->cmd, csize));
/* Don't show nonsense numbers like i.e. 1000% full */
dm_status->mapped_sectors = csize;
}
(*status)->usage = dm_make_percent(dm_status->mapped_sectors, csize);
}
2016-12-01 16:53:35 +03:00
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/*
* Explore state of running DM table to obtain currently used deviceId
*/
int dev_manager_thin_device_id(struct dev_manager *dm,
const struct logical_volume *lv,
uint32_t *device_id, int *exists)
{
const char *dlid;
struct dm_task *dmt;
struct dm_info info;
uint64_t start, length;
char *params, *target_type = NULL;
const char *layer = lv_layer(lv);
int r = 0;
*exists = -1;
if (lv_is_merging_origin(lv) && !lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
/* If the merge has already happened, that table
* can already be using correct LV without -real layer */
layer = NULL;
/* Build dlid for the thin layer */
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking device id for LV %s.",
display_lvname(lv));
if (dm_get_next_target(dmt, NULL, &start, &length,
&target_type, &params)) {
log_error("More then one table line found for %s.",
display_lvname(lv));
goto out;
}
if (!target_type || strcmp(target_type, TARGET_NAME_THIN)) {
log_error("Unexpected target type %s found for thin %s.",
target_type, display_lvname(lv));
goto out;
}
if (!params || sscanf(params, "%*u:%*u %u", device_id) != 1) {
log_error("Cannot parse table like parameters %s for %s.",
params, display_lvname(lv));
goto out;
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_vdo_pool_status(struct dev_manager *dm,
const struct logical_volume *lv, int flush,
struct lv_status_vdo **status, int *exists)
{
const char *dlid;
struct dm_info info;
uint64_t start, length;
struct dm_task *dmt = NULL;
char *type = NULL;
char *params = NULL;
int r = 0;
*exists = -1;
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_vdo))))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
return_0;
if (!(*exists = info.exists))
goto out;
log_debug_activation("Checking VDO pool status for LV %s.",
display_lvname(lv));
if (dm_get_next_target(dmt, NULL, &start, &length, &type, &params)) {
log_error("More then one table line found for %s.",
display_lvname(lv));
goto out;
}
if (!type || strcmp(type, TARGET_NAME_VDO)) {
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_VDO, type ? type : "NULL");
goto out;
}
if (!_vdo_pool_message_stats(dm->mem, lv, *status))
stack;
if (!parse_vdo_pool_status(dm->mem, lv, params, &info, *status))
goto_out;
(*status)->mem = dm->mem;
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
int dev_manager_vdo_pool_size_config(struct dev_manager *dm,
const struct logical_volume *lv,
struct vdo_pool_size_config *cfg)
{
const char *dlid;
struct dm_info info;
uint64_t start, length;
struct dm_task *dmt = NULL;
char *type = NULL;
char *params = NULL;
int r = 0;
unsigned version = 0;
memset(cfg, 0, sizeof(*cfg));
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
return_0;
if (!info.exists)
goto inactive; /* VDO device is not active, should not happen here... */
log_debug_activation("Checking VDO pool table line for LV %s.",
display_lvname(lv));
if (dm_get_next_target(dmt, NULL, &start, &length, &type, &params)) {
log_error("More then one table line found for %s.",
display_lvname(lv));
goto out;
}
if (!type || strcmp(type, TARGET_NAME_VDO)) {
log_error("Expected %s segment type but got %s instead.",
TARGET_NAME_VDO, type ? type : "NULL");
goto out;
}
if (sscanf(params, "V%u %*s " FMTu64 " %*u " FMTu32,
&version, &cfg->physical_size, &cfg->block_map_cache_size_mb) != 3) {
log_error("Failed to parse VDO parameters %s for LV %s.",
params, display_lvname(lv));
goto out;
}
switch (version) {
case 2: break;
case 4: break;
default: log_warn("WARNING: Unknown VDO table line version %u.", version);
}
cfg->virtual_size = length;
cfg->physical_size *= 8; // From 4K unit to 512B
cfg->block_map_cache_size_mb /= 256; // From 4K unit to MiB
cfg->index_memory_size_mb = first_seg(lv)->vdo_params.index_memory_size_mb; // Preserved
inactive:
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/*************************/
/* NEW CODE STARTS HERE */
/*************************/
static int _dev_manager_lv_mknodes(const struct logical_volume *lv)
{
char *name;
if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name,
lv->name, NULL)))
return_0;
return fs_add_lv(lv, name);
}
static int _dev_manager_lv_rmnodes(const struct logical_volume *lv)
{
return fs_del_lv(lv);
}
static int _lv_has_mknode(const struct logical_volume *lv)
{
return (lv_is_visible(lv) &&
(!lv_is_thin_pool(lv) || lv_is_new_thin_pool(lv)));
}
int dev_manager_mknodes(const struct logical_volume *lv)
{
struct dm_info dminfo;
struct dm_task *dmt;
char *name;
int r = 0;
if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
return_0;
if (!(dmt = _setup_task_run(DM_DEVICE_MKNODES, &dminfo, name, NULL, 0, 0, 0, 0, 0, 0)))
return_0;
if (dminfo.exists) {
/* read-only component LV is also made visible */
if (_lv_has_mknode(lv) || (dminfo.read_only && lv_is_component(lv)))
r = _dev_manager_lv_mknodes(lv);
else
r = 1;
} else
r = _dev_manager_lv_rmnodes(lv);
dm_task_destroy(dmt);
return r;
}
#ifdef UDEV_SYNC_SUPPORT
/*
* Until the DM_UEVENT_GENERATED_FLAG was introduced in kernel patch
* 856a6f1dbd8940e72755af145ebcd806408ecedd
* some operations could not be performed by udev, requiring our fallback code.
*/
static int _dm_driver_has_stable_udev_support(void)
{
char vsn[80];
unsigned maj, min, patchlevel;
return driver_version(vsn, sizeof(vsn)) &&
(sscanf(vsn, "%u.%u.%u", &maj, &min, &patchlevel) == 3) &&
(maj == 4 ? min >= 18 : maj > 4);
}
static int _check_udev_fallback(struct cmd_context *cmd)
{
struct config_info *settings = &cmd->current_settings;
if (settings->udev_fallback != -1)
goto out;
/*
* Use udev fallback automatically in case udev
* is disabled via DM_DISABLE_UDEV environment
* variable or udev rules are switched off.
*/
settings->udev_fallback = !settings->udev_rules ? 1 :
find_config_tree_bool(cmd, activation_verify_udev_operations_CFG, NULL);
/* Do not rely fully on udev if the udev support is known to be incomplete. */
if (!settings->udev_fallback && !_dm_driver_has_stable_udev_support()) {
log_very_verbose("Kernel driver has incomplete udev support so "
"LVM will check and perform some operations itself.");
settings->udev_fallback = 1;
}
out:
return settings->udev_fallback;
}
#else /* UDEV_SYNC_SUPPORT */
static int _check_udev_fallback(struct cmd_context *cmd)
{
/* We must use old node/symlink creation code if not compiled with udev support at all! */
return cmd->current_settings.udev_fallback = 1;
}
#endif /* UDEV_SYNC_SUPPORT */
static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_volume *lv,
const char *layer, int noscan, int temporary,
int visible_component)
{
uint16_t udev_flags = 0;
/*
* Instruct also libdevmapper to disable udev
* fallback in accordance to LVM2 settings.
*/
if (!_check_udev_fallback(dm->cmd))
udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
/*
* Is this top-level and visible device?
* If not, create just the /dev/mapper content.
*/
2011-10-29 00:34:45 +04:00
/* FIXME: add target's method for this */
if (lv_is_new_thin_pool(lv) || visible_component)
/* New thin-pool is regular LV with -tpool UUID suffix. */
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
/*
* There's no need for other udev rules to touch special LVs with
* reserved names. We don't need to populate /dev/disk here either.
* Even if they happen to be visible and top-level.
*/
else if (is_reserved_lvname(lv->name))
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
/*
* Snapshots and origins could have the same rule applied that will
* give symlinks exactly the same name (e.g. a name based on
* filesystem UUID). We give preference to origins to make such
* naming deterministic (e.g. symlinks in /dev/disk/by-uuid).
*/
if (lv_is_cow(lv))
udev_flags |= DM_UDEV_LOW_PRIORITY_FLAG;
/*
* Finally, add flags to disable /dev/mapper and /dev/<vgname> content
* to be created by udev if it is requested by user's configuration.
* This is basically an explicit fallback to old node/symlink creation
* without udev.
*/
if (!dm->cmd->current_settings.udev_rules)
udev_flags |= DM_UDEV_DISABLE_DM_RULES_FLAG |
DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG;
/*
activation: flag temporary LVs internally Add LV_TEMPORARY flag for LVs with limited existence during command execution. Such LVs are temporary in way that they need to be activated, some action done and then removed immediately. Such LVs are just like any normal LV - the only difference is that they are removed during LVM command execution. This is also the case for LVs representing future pool metadata spare LVs which we need to initialize by using the usual LV before they are declared as pool metadata spare. We can optimize some other parts like udev to do a better job if it knows that the LV is temporary and any processing on it is just useless. This flag is orthogonal to LV_NOSCAN flag introduced recently as LV_NOSCAN flag is primarily used to mark an LV for the scanning to be avoided before the zeroing of the device happens. The LV_TEMPORARY flag makes a difference between a full-fledged LV visible in the system and the LV just used as a temporary overlay for some action that needs to be done on underlying PVs. For example: lvcreate --thinpool POOL --zero n -L 1G vg - first, the usual LV is created to do a clean up for pool metadata spare. The LV is activated, zeroed, deactivated. - between "activated" and "zeroed" stage, the LV_NOSCAN flag is used to avoid any scanning in udev - betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH udev rule, but since the LV is just a usual LV, we can't make a difference. The LV_TEMPORARY internal LV flag helps here. If we create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is with "invisible" and non-top-level LVs) - udev is directed to skip WATCH rule use. - if the LV_TEMPORARY flag was not used, there would normally be a WATCH event generated once the LV is closed after "zeroed" stage. This will make problems with immediated deactivation that follows.
2013-10-23 16:06:39 +04:00
* LVM subsystem specific flags.
*/
activation: flag temporary LVs internally Add LV_TEMPORARY flag for LVs with limited existence during command execution. Such LVs are temporary in way that they need to be activated, some action done and then removed immediately. Such LVs are just like any normal LV - the only difference is that they are removed during LVM command execution. This is also the case for LVs representing future pool metadata spare LVs which we need to initialize by using the usual LV before they are declared as pool metadata spare. We can optimize some other parts like udev to do a better job if it knows that the LV is temporary and any processing on it is just useless. This flag is orthogonal to LV_NOSCAN flag introduced recently as LV_NOSCAN flag is primarily used to mark an LV for the scanning to be avoided before the zeroing of the device happens. The LV_TEMPORARY flag makes a difference between a full-fledged LV visible in the system and the LV just used as a temporary overlay for some action that needs to be done on underlying PVs. For example: lvcreate --thinpool POOL --zero n -L 1G vg - first, the usual LV is created to do a clean up for pool metadata spare. The LV is activated, zeroed, deactivated. - between "activated" and "zeroed" stage, the LV_NOSCAN flag is used to avoid any scanning in udev - betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH udev rule, but since the LV is just a usual LV, we can't make a difference. The LV_TEMPORARY internal LV flag helps here. If we create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is with "invisible" and non-top-level LVs) - udev is directed to skip WATCH rule use. - if the LV_TEMPORARY flag was not used, there would normally be a WATCH event generated once the LV is closed after "zeroed" stage. This will make problems with immediated deactivation that follows.
2013-10-23 16:06:39 +04:00
if (noscan)
udev_flags |= DM_SUBSYSTEM_UDEV_FLAG0;
if (temporary)
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
return udev_flags;
}
static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, int origin_only);
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv,
struct lv_activate_opts *laopts,
const char *layer);
/*
* Check for device holders (ATM used only for removed pvmove targets)
* and add them into dtree structures.
* When 'laopts != NULL' add them as new nodes - which also corrects READ_AHEAD.
* Note: correct table are already explicitelly PRELOADED.
*/
static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv,
struct lv_activate_opts *laopts,
uint32_t major, const char *d_name)
{
const char *default_uuid_prefix = dm_uuid_prefix();
const size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
const char *name;
const char *uuid;
struct dm_info info;
struct dm_task *dmt;
struct logical_volume *lv_det;
union lvid id;
int dev, r = 0;
errno = 0;
dev = strtoll(d_name + 3, NULL, 10);
if (errno) {
log_error("Failed to parse dm device minor number from %s.", d_name);
return 0;
}
if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info, NULL, NULL, NULL,
major, dev, 0, 0, 0)))
return_0;
if (info.exists) {
uuid = dm_task_get_uuid(dmt);
name = dm_task_get_name(dmt);
log_debug_activation("Checking holder of %s %s (" FMTu32 ":" FMTu32 ") %s.",
display_lvname(lv), uuid, info.major, info.minor,
name);
/* Skip common uuid prefix */
if (!strncmp(default_uuid_prefix, uuid, default_uuid_prefix_len))
uuid += default_uuid_prefix_len;
if (!memcmp(uuid, &lv->vg->id, ID_LEN) &&
!dm_tree_find_node_by_uuid(dtree, uuid)) {
/* trims any UUID suffix (i.e. -cow) */
dm_strncpy((char*)&id, uuid, 2 * sizeof(struct id) + 1);
/* If UUID is not yet in dtree, look for matching LV */
if (!(lv_det = find_lv_in_vg_by_lvid(lv->vg, &id))) {
log_error("Cannot find holder with device name %s in VG %s.",
name, lv->vg->name);
goto out;
}
if (lv_is_cow(lv_det))
lv_det = origin_from_cow(lv_det);
log_debug_activation("Found holder %s of %s.",
display_lvname(lv_det),
display_lvname(lv));
if (!laopts) {
if (!_add_lv_to_dtree(dm, dtree, lv_det, 0))
goto_out;
} else if (!_add_new_lv_to_dtree(dm, dtree, lv_det, laopts, 0))
goto_out;
}
}
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/*
* Add exiting devices which holds given LV device open.
* This is used in case when metadata already do not contain information
* i.e. PVMOVE is being finished and final table is going to be resumed.
*/
static int _add_holders_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv,
struct lv_activate_opts *laopts,
const struct dm_info *info)
{
const char *sysfs_dir = dm_sysfs_dir();
char sysfs_path[PATH_MAX];
struct dirent *dirent;
DIR *d;
int r = 0;
/* Sysfs path of holders */
if (dm_snprintf(sysfs_path, sizeof(sysfs_path), "%sblock/dm-" FMTu32
"/holders", sysfs_dir, info->minor) < 0) {
log_error("sysfs_path dm_snprintf failed.");
return 0;
}
if (!(d = opendir(sysfs_path))) {
log_sys_error("opendir", sysfs_path);
return 0;
}
while ((dirent = readdir(d)))
/* Expects minor is added to 'dm-' prefix */
if (!strncmp(dirent->d_name, "dm-", 3) &&
!_check_holder(dm, dtree, lv, laopts, info->major, dirent->d_name))
goto_out;
r = 1;
out:
if (closedir(d))
log_sys_debug("closedir", "holders");
return r;
}
2005-11-09 16:08:41 +03:00
static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, const char *layer)
{
char *dlid, *name;
struct dm_info info, info2;
const struct dm_active_device *dm_dev;
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer)))
return_0;
if (dev_cache_use_dm_uuid_cache()) {
if (!(dm_dev = dev_cache_get_dm_dev_by_uuid(dm->cmd, dlid))) {
log_debug("Cached as not present %s.", name);
return 1;
}
info = (struct dm_info) {
.exists = 1,
.major = MAJOR(dm_dev->devno),
.minor = MINOR(dm_dev->devno),
};
log_debug("Cached as present %s %s (%d:%d).",
name, dlid, info.major, info.minor);
} else if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
return_0;
/*
* For top level volumes verify that existing device match
* requested major/minor and that major/minor pair is available for use
*/
if (!layer && lv->major != -1 && lv->minor != -1) {
/*
* FIXME compare info.major with lv->major if multiple major support
*/
if (info.exists && ((int) info.minor != lv->minor)) {
log_error("Volume %s (%" PRIu32 ":%" PRIu32")"
" differs from already active device "
2016-12-01 16:53:35 +03:00
"(%" PRIu32 ":%" PRIu32").",
display_lvname(lv), lv->major, lv->minor,
info.major, info.minor);
return 0;
}
if (!info.exists && _info_by_dev(lv->major, lv->minor, &info2) &&
info2.exists) {
log_error("The requested major:minor pair "
2016-12-01 16:53:35 +03:00
"(%" PRIu32 ":%" PRIu32") is already used.",
lv->major, lv->minor);
return 0;
}
}
if (info.exists && !dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
_get_udev_flags(dm, lv, layer,
0, 0, 0))) {
2016-12-01 16:53:35 +03:00
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.",
info.major, info.minor);
return 0;
}
if (info.exists && dm->track_pending_delete) {
log_debug_activation("Tracking pending delete for %s%s%s (%s).",
display_lvname(lv), layer ? "-" : "", layer ? : "", dlid);
if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, dlid))
return_0;
}
/*
* Find holders of existing active LV where name starts with 'pvmove',
* but it's not anymore PVMOVE LV and also it's not PVMOVE _mimage
*/
if (info.exists && !lv_is_pvmove(lv) &&
!strchr(lv->name, '_') && !strncmp(lv->name, "pvmove", 6))
if (!_add_holders_to_dtree(dm, dtree, lv, NULL, &info))
return_0;
return 1;
}
struct pool_cb_data {
struct dev_manager *dm;
const struct logical_volume *pool_lv;
int skip_zero; /* to skip zeroed device header (check first 64B) */
int exec; /* which binary to call */
int opts;
struct {
unsigned maj;
unsigned min;
unsigned patch;
} version;
const char *global;
};
/*
* Simple version of check function calling 'tool -V'
*
* Returns 1 if the tool's version is equal or better to given.
* Otherwise it returns 0.
*/
static int _check_tool_version(struct cmd_context *cmd, const char *tool,
unsigned maj, unsigned min, unsigned patch)
{
const char *argv[] = { tool, "-V", NULL };
struct pipe_data pdata;
FILE *f;
char buf[128] = { 0 };
char *nl;
unsigned v_maj, v_min, v_patch;
int ret = 0;
if (!(f = pipe_open(cmd, argv, 0, &pdata))) {
log_warn("WARNING: Cannot read output from %s.", argv[0]);
} else {
if (fgets(buf, sizeof(buf) - 1, f) &&
(sscanf(buf, "%u.%u.%u", &v_maj, &v_min, &v_patch) == 3)) {
if ((v_maj > maj) ||
((v_maj == maj) &&
((v_min > min) ||
(v_min == min && v_patch >= patch))))
ret = 1;
if ((nl = strchr(buf, '\n')))
nl[0] = 0; /* cut newline away */
log_verbose("Found version of %s %s is %s then requested %u.%u.%u.",
argv[0], buf, ret ? "better" : "older", maj, min, patch);
} else
log_warn("WARNING: Cannot parse output '%s' from %s.", buf, argv[0]);
(void) pipe_close(&pdata);
}
return ret;
}
static int _pool_callback(struct dm_tree_node *node,
dm_node_callback_t type, void *cb_data)
{
int ret, status = 0, fd;
const struct pool_cb_data *data = cb_data;
const struct logical_volume *pool_lv = data->pool_lv;
const struct logical_volume *mlv = first_seg(pool_lv)->metadata_lv;
struct cmd_context *cmd = pool_lv->vg->cmd;
long buf[64 / sizeof(long)]; /* buffer for short disk header (64B) */
int args = 0;
char *mpath;
const char *argv[DEFAULT_MAX_EXEC_ARGS + 7] = { /* Max supported 15 args */
find_config_tree_str_allow_empty(cmd, data->exec, NULL)
};
if (!argv[0] || !*argv[0]) /* *_check tool is unconfigured/disabled with "" setting */
return 1;
if (lv_is_cache_vol(pool_lv)) {
if (!(mpath = lv_dmpath_suffix_dup(data->dm->mem, pool_lv, "-cmeta"))) {
log_error("Failed to build device path for checking cachevol metadata %s.",
display_lvname(pool_lv));
return 0;
}
} else {
if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
log_error("Failed to build device path for checking pool metadata %s.",
display_lvname(mlv));
return 0;
}
}
dev_cache_destroy_dm_uuids();
log_debug("Running check command on %s", mpath);
if (data->skip_zero) {
if ((fd = open(mpath, O_RDONLY)) < 0) {
log_sys_error("open", mpath);
return 0;
}
/* let's assume there is no problem to read 64 bytes */
if (read(fd, buf, sizeof(buf)) < (int)sizeof(buf)) {
log_sys_error("read", mpath);
if (close(fd))
log_sys_error("close", mpath);
return 0;
}
for (ret = 0; ret < (int) DM_ARRAY_SIZE(buf); ++ret)
if (buf[ret])
break;
if (close(fd))
log_sys_error("close", mpath);
if (ret == (int) DM_ARRAY_SIZE(buf)) {
log_debug_activation("Metadata checking skipped, detected empty disk header on %s.",
mpath);
return 1;
}
}
if (!prepare_exec_args(cmd, argv, &args, data->opts))
return_0;
argv[++args] = mpath;
if (!(ret = exec_cmd(cmd, (const char * const *)argv,
&status, 0))) {
if (status == ENOENT) {
2018-03-19 14:05:36 +03:00
log_warn("WARNING: Check is skipped, please install recommended missing binary %s!",
argv[0]);
return 1;
}
if ((data->version.maj || data->version.min || data->version.patch) &&
!_check_tool_version(cmd, argv[0],
data->version.maj, data->version.min, data->version.patch)) {
log_warn("WARNING: Check is skipped, please upgrade installed version of %s!",
argv[0]);
return 1;
}
switch (type) {
case DM_NODE_CALLBACK_PRELOADED:
log_err_once("Check of pool %s failed (status:%d). "
"Manual repair required!",
display_lvname(pool_lv), status);
break;
default:
log_warn("WARNING: Integrity check of metadata for pool "
"%s failed.", display_lvname(pool_lv));
}
/*
* FIXME: What should we do here??
*
* Maybe mark the node, so it's not activating
* as pool but as error/linear and let the
* dm tree resolve the issue.
*/
}
return ret;
}
static int _pool_register_callback(struct dev_manager *dm,
struct dm_tree_node *node,
const struct logical_volume *lv)
{
struct pool_cb_data *data;
/* Do not skip metadata of testing even for unused thin pools */
#if 0
/* Skip metadata testing for unused thin pool. */
if (lv_is_thin_pool(lv) &&
(!first_seg(lv)->transaction_id ||
((first_seg(lv)->transaction_id == 1) &&
pool_has_message(first_seg(lv), NULL, 0))))
return 1;
#endif
/* Skip validation of metadata for lvremove and vgremove */
if (!dm->activation &&
(!strcmp(dm->cmd->name, "lvremove") ||
!strcmp(dm->cmd->name, "vgremove"))) {
log_debug("Skipping %s callback registration for command %s.",
display_lvname(lv), dm->cmd->name);
return 1;
}
if (!(data = dm_pool_zalloc(dm->mem, sizeof(*data)))) {
log_error("Failed to allocated path for callback.");
return 0;
}
data->dm = dm;
if (lv_is_thin_pool(lv)) {
data->pool_lv = lv;
data->skip_zero = 1;
data->exec = global_thin_check_executable_CFG;
data->opts = global_thin_check_options_CFG;
data->global = "thin";
} else if (lv_is_cache(lv)) { /* cache pool */
data->pool_lv = first_seg(lv)->pool_lv;
data->skip_zero = 1; /* cheap read-error detection */
data->exec = global_cache_check_executable_CFG;
data->opts = global_cache_check_options_CFG;
data->global = "cache";
if (first_seg(first_seg(lv)->pool_lv)->cache_metadata_format > 1) {
data->version.maj = 0;
data->version.min = 7;
}
} else {
log_error(INTERNAL_ERROR "Registering unsupported pool callback.");
return 0;
}
dm_tree_node_set_callback(node, _pool_callback, data);
return 1;
}
static struct id _get_id_for_meta_or_data(const struct lv_segment *lvseg, int meta_or_data)
{
/* When ID is provided in form of metadata_id or data_id, otherwise use CVOL ID */
if (meta_or_data && lvseg->metadata_id)
return *lvseg->metadata_id;
if (!meta_or_data && lvseg->data_id)
return *lvseg->data_id;
return lvseg->pool_lv->lvid.id[1];
}
/* Add special devices _cmeta & _cdata on top of CacheVol to dm tree */
static int _add_cvol_subdev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, int meta_or_data)
{
const char *layer = meta_or_data ? "cmeta" : "cdata";
struct dm_pool *mem = dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem;
struct lv_segment *lvseg = first_seg(lv);
const struct logical_volume *pool_lv = lvseg->pool_lv;
struct dm_info info;
char *name ,*dlid;
union lvid lvid = { { lv->vg->id, _get_id_for_meta_or_data(lvseg, meta_or_data) } };
if (!(dlid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&lvid.s, layer)))
return_0;
/* Name is actually not really needed here, but aids debugging... */
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, layer)))
return_0;
if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
return_0;
if (info.exists) {
if (!dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
_get_udev_flags(dm, lv, layer, 0, 0, 0))) {
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info.major, info.minor);
return 0;
}
if (dm->track_pending_delete) {
log_debug_activation("Tracking pending delete for %s-%s (%s).",
display_lvname(pool_lv), layer, dlid);
if (!str_list_add(mem, &dm->cmd->pending_delete, dlid))
return_0;
}
}
return 1;
}
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* Declaration to resolve suspend tree and message passing for thin-pool */
static int _add_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
struct lv_segment *seg,
struct lv_activate_opts *laopts);
/*
* Add LV and any known dependencies
*/
static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, int origin_only)
{
uint32_t s;
struct seg_list *sl;
struct dm_list *snh;
struct lv_segment *seg;
struct dm_tree_node *node;
const struct logical_volume *plv;
if (lv_is_pvmove(lv) && (dm->track_pvmove_deps == 2))
return 1; /* Avoid rechecking of already seen pvmove LV */
if (lv_is_cache_pool(lv)) {
if (!dm_list_empty(&lv->segs_using_this_lv)) {
if (!_add_lv_to_dtree(dm, dtree, seg_lv(first_seg(lv), 0), 0))
return_0;
if (!_add_lv_to_dtree(dm, dtree, first_seg(lv)->metadata_lv, 0))
return_0;
2014-11-11 15:31:25 +03:00
/* Cache pool does not have a real device node */
return 1;
}
/* Unused cache pool is activated as metadata */
}
if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, NULL))
return_0;
/* FIXME Can we avoid doing this every time? */
/* Reused also for lv_is_external_origin(lv) */
2005-11-09 16:08:41 +03:00
if (!_add_dev_to_dtree(dm, dtree, lv, "real"))
return_0;
if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, "cow"))
return_0;
if (origin_only && lv_is_thin_volume(lv)) {
if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
return_0;
#if 0
/* ? Use origin_only to avoid 'deep' thin pool suspend ? */
/* FIXME Implement dm_tree_node_skip_childrens optimisation */
if (!(uuid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
if ((node = dm_tree_find_node_by_uuid(dtree, uuid)))
dm_tree_node_skip_childrens(node, 1);
#endif
}
if (dm->activation && lv_is_external_origin(lv)) {
/* Find possible users of external origin lv */
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (!_add_dev_to_dtree(dm, dtree, sl->seg->lv, lv_layer(sl->seg->lv)))
return_0;
}
if (lv_is_thin_pool(lv)) {
/*
* For both origin_only and !origin_only
* skips test for -tpool-real and tpool-cow
*/
if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
return_0;
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/*
* TODO: change API and move this code
* Could be easier to handle this in _add_dev_to_dtree()
* and base this according to info.exists ?
*/
if (!dm->activation) {
if ((node = _cached_dm_tree_node(dm->mem, dtree, lv, lv_layer(lv)))) {
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if (origin_only) {
struct lv_activate_opts laopts = {
.origin_only = 1,
.send_messages = 1 /* Node with messages */
};
/*
* Add some messsages if right node exist in the table only
* when building SUSPEND tree for origin-only thin-pool.
*
* TODO: Fix call of '_add_target_to_dtree()' to add message
* to thin-pool node as we already know the pool node exists
* in the table. Any better/cleaner API way ?
*
* Probably some 'new' target method to add messages for any node?
*/
if (dm->suspend &&
!dm_list_empty(&(first_seg(lv)->thin_messages)) &&
!_add_target_to_dtree(dm, node, first_seg(lv), &laopts))
return_0;
} else {
/* Setup callback for non-activation partial tree */
/* Activation gets own callback when needed */
/* TODO: extend _cached_dm_info() to return dnode */
if (!_pool_register_callback(dm, node, lv))
return_0;
}
}
}
}
if (lv_is_vdo_pool(lv)) {
/*
* For both origin_only and !origin_only
* skips test for -vpool-real and vpool-cow
*/
if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
return_0;
}
2014-11-10 12:56:43 +03:00
if (lv_is_cache(lv)) {
if (!origin_only && !dm->activation && !dm->track_pending_delete) {
/* Setup callback for non-activation partial tree */
/* Activation gets own callback when needed */
if ((node = _cached_dm_tree_node(dm->mem, dtree, lv, lv_layer(lv))) &&
!_pool_register_callback(dm, node, lv))
return_0;
}
}
if (lv_is_cache_vol(lv))
/* Cachevol with cache LV spans some extra layers -cdata, -cmeta */
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (lv_is_cache(sl->seg->lv) &&
(!_add_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, 0) ||
!_add_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, 1) ||
!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv))))
return_0;
/* Add any snapshots of this LV */
if (!origin_only && lv_is_origin(lv))
dm_list_iterate(snh, &lv->snapshot_segs)
if (!_add_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, 0))
return_0;
if (dm->activation && !origin_only && lv_is_merging_origin(lv) &&
!_add_lv_to_dtree(dm, dtree, find_snapshot(lv)->lv, 1))
return_0;
/* Add any LVs referencing a PVMOVE LV unless told not to. */
if ((dm->track_pvmove_deps == 1) && lv_is_pvmove(lv)) {
dm->track_pvmove_deps = 2; /* Mark as already seen */
dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
/* If LV is snapshot COW - whole snapshot needs reload */
plv = lv_is_cow(sl->seg->lv) ? origin_from_cow(sl->seg->lv) : sl->seg->lv;
if (!_add_lv_to_dtree(dm, dtree, plv, 0))
return_0;
}
dm->track_pvmove_deps = 1;
}
if (!dm->track_pending_delete)
dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
if (lv_is_pending_delete(sl->seg->lv)) {
/* LV is referenced by 'cache pending delete LV */
dm->track_pending_delete = 1;
if (!_cached_dm_tree_node(dm->mem, dtree, sl->seg->lv, lv_layer(sl->seg->lv)) &&
!_add_lv_to_dtree(dm, dtree, sl->seg->lv, 0))
return_0;
dm->track_pending_delete = 0;
}
2014-11-10 12:56:43 +03:00
}
/* Add any LVs used by segments in this LV */
dm_list_iterate_items(seg, &lv->segments) {
if (seg->external_lv &&
!_add_lv_to_dtree(dm, dtree, seg->external_lv,
/* For origin LV check for complete device tree */
lv_is_origin(seg->external_lv) ? 0 : 1)) /* stack */
return_0;
if (seg->log_lv &&
!_add_lv_to_dtree(dm, dtree, seg->log_lv, 0))
return_0;
if (seg->metadata_lv &&
!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
return_0;
if (seg->writecache && seg_is_writecache(seg)) {
if (!_add_lv_to_dtree(dm, dtree, seg->writecache, dm->activation ? origin_only : 1))
return_0;
}
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
if (seg->integrity_meta_dev && seg_is_integrity(seg)) {
if (!_add_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, dm->activation ? origin_only : 1))
return_0;
}
if (seg->pool_lv &&
/* When activating and not origin_only detect linear 'overlay' over pool */
!_add_lv_to_dtree(dm, dtree, seg->pool_lv, dm->activation ? origin_only : 1))
return_0;
for (s = 0; s < seg->area_count; s++) {
if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) &&
/* origin only for cache without pending delete */
(!dm->track_pending_delete || !lv_is_cache(lv)) &&
!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s),
lv_is_vdo_pool(seg_lv(seg, s)) ? 1 : 0))
return_0;
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
!_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
return_0;
}
/* When activating, detect merging LV presence */
if (dm->activation && seg->merge_lv &&
!_add_lv_to_dtree(dm, dtree, seg->merge_lv, 1))
return_0;
}
return 1;
}
static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, const struct logical_volume *lv, int origin_only)
{
2005-11-09 16:05:17 +03:00
struct dm_tree *dtree;
2005-11-09 16:05:17 +03:00
if (!(dtree = dm_tree_create())) {
log_debug_activation("Partial dtree creation failed for %s.",
display_lvname(lv));
return NULL;
}
dm_tree_set_optional_uuid_suffixes(dtree, (const char**)_uuid_suffix_list);
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if (!_add_lv_to_dtree(dm, dtree, lv, (lv_is_origin(lv) || lv_is_thin_volume(lv) || lv_is_thin_pool(lv)) ? origin_only : 0))
2008-01-30 16:19:47 +03:00
goto_bad;
return dtree;
2008-01-30 16:19:47 +03:00
bad:
2005-11-09 16:05:17 +03:00
dm_tree_free(dtree);
return NULL;
}
static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *dtree,
struct lv_segment *seg, int s, int use_zero)
{
char *dlid, *name;
char errid[32];
struct dm_tree_node *node;
struct lv_segment *seg_i;
struct dm_info info;
int segno = -1, i = 0;
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
uint64_t size = (uint64_t) _seg_len(seg) * seg->lv->vg->extent_size;
dm_list_iterate_items(seg_i, &seg->lv->segments) {
if (seg == seg_i) {
segno = i;
break;
}
++i;
}
if (segno < 0) {
log_error(INTERNAL_ERROR "_add_error_or_zero_device called with bad segment.");
2011-10-11 13:02:20 +04:00
return NULL;
}
sprintf(errid, "missing_%d_%d", segno, s);
if (!(dlid = build_dm_uuid(dm->mem, seg->lv, errid)))
return_NULL;
if (!(name = dm_build_dm_name(dm->mem, seg->lv->vg->name,
seg->lv->name, errid)))
return_NULL;
if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
return_NULL;
if (!info.exists) {
/* Create new node */
if (!(node = dm_tree_add_new_dev(dtree, name, dlid, 0, 0, 0, 0, 0)))
return_NULL;
if (use_zero) {
if (!dm_tree_node_add_zero_target(node, size))
return_NULL;
} else
if (!dm_tree_node_add_error_target(node, size))
return_NULL;
} else {
/* Already exists */
if (!dm_tree_add_dev(dtree, info.major, info.minor)) {
2016-11-11 18:38:04 +03:00
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.",
info.major, info.minor);
2016-11-08 18:20:51 +03:00
return NULL;
}
}
return dlid;
}
static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
struct lv_segment *seg, int s)
{
char *dlid;
uint64_t extent_size = seg->lv->vg->extent_size;
int use_zero = !strcmp(dm->cmd->stripe_filler, TARGET_NAME_ZERO) ? 1 : 0;
if (!strcmp(dm->cmd->stripe_filler, TARGET_NAME_ERROR) || use_zero) {
/*
* FIXME, the tree pointer is first field of dm_tree_node, but
* we don't have the struct definition available.
*/
struct dm_tree **tree = (struct dm_tree **) node;
if (!(dlid = _add_error_or_zero_device(dm, *tree, seg, s, use_zero)))
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
return_0;
2008-09-19 11:18:03 +04:00
} else
if (!dm_tree_node_add_target_area(node, dm->cmd->stripe_filler, NULL, UINT64_C(0)))
return_0;
2008-09-19 11:18:03 +04:00
return 1;
}
static int _bad_pv_area(struct lv_segment *seg, uint32_t s)
{
struct stat info;
const char *name;
struct device *dev;
if (!seg_pvseg(seg, s))
return 1;
if (!seg_pv(seg, s))
return 1;
if (!(dev = seg_dev(seg, s)))
return 1;
if (dm_list_empty(&dev->aliases))
return 1;
/* FIXME Avoid repeating identical stat in dm_tree_node_add_target_area */
name = dev_name(dev);
if (stat(name, &info) < 0)
return 1;
if (!S_ISBLK(info.st_mode))
return 1;
return 0;
}
int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
struct dm_tree_node *node, uint32_t start_area,
uint32_t areas)
{
struct cmd_context *cmd = seg->lv->vg->cmd;
uint64_t extent_size = seg->lv->vg->extent_size;
uint32_t s;
2002-03-19 02:25:50 +03:00
char *dlid;
const char *name;
unsigned num_error_areas = 0;
unsigned num_existing_areas = 0;
for (s = start_area; s < areas; s++) {
if (((seg_type(seg, s) == AREA_PV) && _bad_pv_area(seg, s)) ||
((seg_type(seg, s) == AREA_LV) && !seg_lv(seg, s))) {
if (!cmd->partial_activation) {
2023-03-22 21:05:43 +03:00
if (!cmd->degraded_activation ||
(!lv_is_raid_type(seg->lv) &&
!lv_is_integrity(seg->lv) &&
!lv_is_integrity_metadata(seg->lv) &&
!lv_is_integrity_origin(seg->lv))) {
log_error("Aborting. LV %s is incomplete and --activationmode partial was not specified.",
display_lvname(seg->lv));
activation: Add "degraded" activation mode Currently, we have two modes of activation, an unnamed nominal mode (which I will refer to as "complete") and "partial" mode. The "complete" mode requires that a volume group be 'complete' - that is, no missing PVs. If there are any missing PVs, no affected LVs are allowed to activate - even RAID LVs which might be able to tolerate a failure. The "partial" mode allows anything to be activated (or at least attempted). If a non-redundant LV is missing a portion of its addressable space due to a device failure, it will be replaced with an error target. RAID LVs will either activate or fail to activate depending on how badly their redundancy is compromised. This patch adds a third option, "degraded" mode. This mode can be selected via the '--activationmode {complete|degraded|partial}' option to lvchange/vgchange. It can also be set in lvm.conf. The "degraded" activation mode allows RAID LVs with a sufficient level of redundancy to activate (e.g. a RAID5 LV with one device failure, a RAID6 with two device failures, or RAID1 with n-1 failures). RAID LVs with too many device failures are not allowed to activate - nor are any non-redundant LVs that may have been affected. This patch also makes the "degraded" mode the default activation mode. The degraded activation mode does not yet work in a cluster. A new cluster lock flag (LCK_DEGRADED_MODE) will need to be created to make that work. Currently, there is limited space for this extra flag and I am looking for possible solutions. One possible solution is to usurp LCK_CONVERT, as it is not used. When the locking_type is 3, the degraded mode flag simply gets dropped and the old ("complete") behavior is exhibited.
2014-07-10 07:56:11 +04:00
return 0;
}
}
if (!_add_error_area(dm, node, seg, s))
return_0;
num_error_areas++;
} else if (seg_type(seg, s) == AREA_PV) {
struct device *dev = seg_dev(seg, s);
name = dm_list_empty(&dev->aliases) ? NULL : dev_name(dev);
if (!dm_tree_node_add_target_area(node, name, NULL,
(seg_pv(seg, s)->pe_start + (extent_size * seg_pe(seg, s)))))
return_0;
num_existing_areas++;
Add the ability to split an image from the mirror and track changes. ~> lvconvert --splitmirrors 1 --trackchanges vg/lv The '--trackchanges' option allows a user the ability to use an image of a RAID1 array for the purposes of temporary read-only access. The image can be merged back into the array at a later time and only the blocks that have changed in the array since the split will be resync'ed. This operation can be thought of as a partial split. The image is never completely extracted from the array, in that the array reserves the position the device occupied and tracks the differences between the array and the split image via a bitmap. The image itself is rendered read-only and the name (<LV>_rimage_*) cannot be changed. The user can complete the split (permanently splitting the image from the array) by re-issuing the 'lvconvert' command without the '--trackchanges' argument and specifying the '--name' argument. ~> lvconvert --splitmirrors 1 --name my_split vg/lv Merging the tracked image back into the array is done with the '--merge' option (included in a follow-on patch). ~> lvconvert --merge vg/lv_rimage_<n> The internal mechanics of this are relatively simple. The 'raid' device- mapper target allows for the specification of an empty slot in an array via '- -'. This is what will be used if a partial activation of an array is ever required. (It would also be possible to use 'error' targets in place of the '- -'.) If a RAID image is found to be both read-only and visible, then it is considered separate from the array and '- -' is used to hold it's position in the array. So, all that needs to be done to temporarily split an image from the array /and/ cause the kernel target's bitmap to track (aka "mark") changes made is to make the specified image visible and read-only. To merge the device back into the array, the image needs to be returned to the read/write state of the top-level LV and made invisible.
2011-08-18 23:38:26 +04:00
} else if (seg_is_raid(seg)) {
/*
* RAID can handle unassigned areas. It simple puts
* '- -' in for the metadata/data device pair. This
* is a valid way to indicate to the RAID target that
* the device is missing.
*
* If an image is marked as VISIBLE_LV and !LVM_WRITE,
* it means the device has temporarily been extracted
* from the array. It may come back at a future date,
* so the bitmap must track differences. Again, '- -'
* is used in the CTR table.
*/
if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
2015-01-28 15:34:12 +03:00
(lv_is_visible(seg_lv(seg, s)) &&
Add the ability to split an image from the mirror and track changes. ~> lvconvert --splitmirrors 1 --trackchanges vg/lv The '--trackchanges' option allows a user the ability to use an image of a RAID1 array for the purposes of temporary read-only access. The image can be merged back into the array at a later time and only the blocks that have changed in the array since the split will be resync'ed. This operation can be thought of as a partial split. The image is never completely extracted from the array, in that the array reserves the position the device occupied and tracks the differences between the array and the split image via a bitmap. The image itself is rendered read-only and the name (<LV>_rimage_*) cannot be changed. The user can complete the split (permanently splitting the image from the array) by re-issuing the 'lvconvert' command without the '--trackchanges' argument and specifying the '--name' argument. ~> lvconvert --splitmirrors 1 --name my_split vg/lv Merging the tracked image back into the array is done with the '--merge' option (included in a follow-on patch). ~> lvconvert --merge vg/lv_rimage_<n> The internal mechanics of this are relatively simple. The 'raid' device- mapper target allows for the specification of an empty slot in an array via '- -'. This is what will be used if a partial activation of an array is ever required. (It would also be possible to use 'error' targets in place of the '- -'.) If a RAID image is found to be both read-only and visible, then it is considered separate from the array and '- -' is used to hold it's position in the array. So, all that needs to be done to temporarily split an image from the array /and/ cause the kernel target's bitmap to track (aka "mark") changes made is to make the specified image visible and read-only. To merge the device back into the array, the image needs to be returned to the read/write state of the top-level LV and made invisible.
2011-08-18 23:38:26 +04:00
!(seg_lv(seg, s)->status & LVM_WRITE))) {
/* One each for metadata area and data area */
if (!dm_tree_node_add_null_area(node, 0) ||
!dm_tree_node_add_null_area(node, 0))
return_0;
Add the ability to split an image from the mirror and track changes. ~> lvconvert --splitmirrors 1 --trackchanges vg/lv The '--trackchanges' option allows a user the ability to use an image of a RAID1 array for the purposes of temporary read-only access. The image can be merged back into the array at a later time and only the blocks that have changed in the array since the split will be resync'ed. This operation can be thought of as a partial split. The image is never completely extracted from the array, in that the array reserves the position the device occupied and tracks the differences between the array and the split image via a bitmap. The image itself is rendered read-only and the name (<LV>_rimage_*) cannot be changed. The user can complete the split (permanently splitting the image from the array) by re-issuing the 'lvconvert' command without the '--trackchanges' argument and specifying the '--name' argument. ~> lvconvert --splitmirrors 1 --name my_split vg/lv Merging the tracked image back into the array is done with the '--merge' option (included in a follow-on patch). ~> lvconvert --merge vg/lv_rimage_<n> The internal mechanics of this are relatively simple. The 'raid' device- mapper target allows for the specification of an empty slot in an array via '- -'. This is what will be used if a partial activation of an array is ever required. (It would also be possible to use 'error' targets in place of the '- -'.) If a RAID image is found to be both read-only and visible, then it is considered separate from the array and '- -' is used to hold it's position in the array. So, all that needs to be done to temporarily split an image from the array /and/ cause the kernel target's bitmap to track (aka "mark") changes made is to make the specified image visible and read-only. To merge the device back into the array, the image needs to be returned to the read/write state of the top-level LV and made invisible.
2011-08-18 23:38:26 +04:00
continue;
}
if (seg->meta_areas && seg_metalv(seg, s)) {
if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s), NULL)))
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
return_0;
} else if (!dm_tree_node_add_null_area(node, 0))
Add the ability to split an image from the mirror and track changes. ~> lvconvert --splitmirrors 1 --trackchanges vg/lv The '--trackchanges' option allows a user the ability to use an image of a RAID1 array for the purposes of temporary read-only access. The image can be merged back into the array at a later time and only the blocks that have changed in the array since the split will be resync'ed. This operation can be thought of as a partial split. The image is never completely extracted from the array, in that the array reserves the position the device occupied and tracks the differences between the array and the split image via a bitmap. The image itself is rendered read-only and the name (<LV>_rimage_*) cannot be changed. The user can complete the split (permanently splitting the image from the array) by re-issuing the 'lvconvert' command without the '--trackchanges' argument and specifying the '--name' argument. ~> lvconvert --splitmirrors 1 --name my_split vg/lv Merging the tracked image back into the array is done with the '--merge' option (included in a follow-on patch). ~> lvconvert --merge vg/lv_rimage_<n> The internal mechanics of this are relatively simple. The 'raid' device- mapper target allows for the specification of an empty slot in an array via '- -'. This is what will be used if a partial activation of an array is ever required. (It would also be possible to use 'error' targets in place of the '- -'.) If a RAID image is found to be both read-only and visible, then it is considered separate from the array and '- -' is used to hold it's position in the array. So, all that needs to be done to temporarily split an image from the array /and/ cause the kernel target's bitmap to track (aka "mark") changes made is to make the specified image visible and read-only. To merge the device back into the array, the image needs to be returned to the read/write state of the top-level LV and made invisible.
2011-08-18 23:38:26 +04:00
return_0;
if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s), NULL)))
Add the ability to split an image from the mirror and track changes. ~> lvconvert --splitmirrors 1 --trackchanges vg/lv The '--trackchanges' option allows a user the ability to use an image of a RAID1 array for the purposes of temporary read-only access. The image can be merged back into the array at a later time and only the blocks that have changed in the array since the split will be resync'ed. This operation can be thought of as a partial split. The image is never completely extracted from the array, in that the array reserves the position the device occupied and tracks the differences between the array and the split image via a bitmap. The image itself is rendered read-only and the name (<LV>_rimage_*) cannot be changed. The user can complete the split (permanently splitting the image from the array) by re-issuing the 'lvconvert' command without the '--trackchanges' argument and specifying the '--name' argument. ~> lvconvert --splitmirrors 1 --name my_split vg/lv Merging the tracked image back into the array is done with the '--merge' option (included in a follow-on patch). ~> lvconvert --merge vg/lv_rimage_<n> The internal mechanics of this are relatively simple. The 'raid' device- mapper target allows for the specification of an empty slot in an array via '- -'. This is what will be used if a partial activation of an array is ever required. (It would also be possible to use 'error' targets in place of the '- -'.) If a RAID image is found to be both read-only and visible, then it is considered separate from the array and '- -' is used to hold it's position in the array. So, all that needs to be done to temporarily split an image from the array /and/ cause the kernel target's bitmap to track (aka "mark") changes made is to make the specified image visible and read-only. To merge the device back into the array, the image needs to be returned to the read/write state of the top-level LV and made invisible.
2011-08-18 23:38:26 +04:00
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
return_0;
} else if (seg_type(seg, s) == AREA_LV) {
if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s), NULL)))
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
return_0;
} else {
log_error(INTERNAL_ERROR "Unassigned area found in LV %s.",
display_lvname(seg->lv));
return 0;
}
}
if (num_error_areas) {
/* Thins currently do not support partial activation */
if (lv_is_thin_type(seg->lv)) {
log_error("Cannot activate %s: pool incomplete.",
display_lvname(seg->lv));
return 0;
}
}
return 1;
}
static int _add_layer_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
const struct logical_volume *lv)
{
const char *layer_dlid;
if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0;
/* Add linear mapping over layered LV */
/* From VDO layer expose ONLY vdo pool header, we would need to use virtual size otherwise */
if (!add_linear_area_to_dtree(dnode, lv_is_vdo_pool(lv) ? 8 : lv->size,
lv->vg->extent_size,
lv->vg->cmd->use_linear_target,
lv->vg->name, lv->name) ||
!dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0))
return_0;
return 1;
}
2005-11-09 16:08:41 +03:00
static int _add_origin_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
const struct logical_volume *lv)
{
const char *real_dlid;
if (!(real_dlid = build_dm_uuid(dm->mem, lv, "real")))
return_0;
2005-11-09 16:05:17 +03:00
if (!dm_tree_node_add_snapshot_origin_target(dnode, lv->size, real_dlid))
return_0;
return 1;
}
static int _add_snapshot_merge_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
const struct logical_volume *lv)
{
const char *origin_dlid, *cow_dlid, *merge_dlid;
struct lv_segment *merging_snap_seg = find_snapshot(lv);
if (!lv_is_merging_origin(lv)) {
log_error(INTERNAL_ERROR "LV %s is not merging snapshot.",
display_lvname(lv));
return 0;
}
if (!(origin_dlid = build_dm_uuid(dm->mem, lv, "real")))
return_0;
if (!(cow_dlid = build_dm_uuid(dm->mem, merging_snap_seg->cow, "cow")))
return_0;
if (!(merge_dlid = build_dm_uuid(dm->mem, merging_snap_seg->cow, NULL)))
return_0;
if (!dm_tree_node_add_snapshot_merge_target(dnode, lv->size, origin_dlid,
cow_dlid, merge_dlid,
merging_snap_seg->chunk_size))
return_0;
return 1;
}
2005-11-09 16:08:41 +03:00
static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
const struct logical_volume *lv,
struct lv_activate_opts *laopts)
{
const char *origin_dlid;
const char *cow_dlid;
struct lv_segment *snap_seg;
uint64_t size;
if (!(snap_seg = find_snapshot(lv))) {
log_error("Couldn't find snapshot for '%s'.",
display_lvname(lv));
2005-10-26 23:50:00 +04:00
return 0;
}
if (!(origin_dlid = build_dm_uuid(dm->mem, snap_seg->origin, "real")))
return_0;
if (!(cow_dlid = build_dm_uuid(dm->mem, snap_seg->cow, "cow")))
return_0;
size = (uint64_t) snap_seg->len * snap_seg->origin->vg->extent_size;
if (!laopts->no_merging && lv_is_merging_cow(lv)) {
/* cow is to be merged so load the error target */
if (!dm_tree_node_add_error_target(dnode, size))
return_0;
}
else if (!dm_tree_node_add_snapshot_target(dnode, size, origin_dlid,
cow_dlid, 1, snap_seg->chunk_size))
return_0;
return 1;
}
2005-11-09 16:08:41 +03:00
static int _add_target_to_dtree(struct dev_manager *dm,
struct dm_tree_node *dnode,
struct lv_segment *seg,
struct lv_activate_opts *laopts)
{
uint64_t extent_size = seg->lv->vg->extent_size;
if (!seg->segtype->ops->add_target_line) {
log_error(INTERNAL_ERROR "_emit_target cannot handle "
"segment type %s.", lvseg_name(seg));
return 0;
}
return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
&dm->target_state, seg,
laopts, dnode,
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
extent_size * _seg_len(seg),
&dm->pvmove_mirror_count);
2002-03-16 01:59:12 +03:00
}
static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
struct dm_tree *dtree,
struct logical_volume *external_lv,
struct lv_activate_opts *laopts)
{
struct seg_list *sl;
struct dm_tree_node *dnode;
/* We've already processed this node if it already has a context ptr */
if ((dnode = _cached_dm_tree_node(dm->mem, dtree, external_lv, lv_layer(external_lv))) &&
dm_tree_node_get_context(dnode)) {
/* Skip repeated invocation of external lv processing */
log_debug_activation("Skipping users for already added external origin LV %s.",
display_lvname(external_lv));
return 1;
}
log_debug_activation("Adding external origin LV %s and all active users.",
display_lvname(external_lv));
/* If there is active origin LV, add whole origin device, otherwise only -layer */
if (!_add_new_lv_to_dtree(dm, dtree, external_lv, laopts,
(lv_is_origin(external_lv) &&
_cached_dm_info(dm->mem, dtree, external_lv, NULL))
? NULL : lv_layer(external_lv)))
return_0;
/*
* Add all ACTIVE LVs using this external origin LV. This is
* needed because of conversion of thin which could have been
* also an old-snapshot to external origin.
*/
dm_list_iterate_items(sl, &external_lv->segs_using_this_lv)
/* Add only active layered devices (also avoids loop) */
if (_cached_dm_info(dm->mem, dtree, sl->seg->lv,
lv_layer(sl->seg->lv)) &&
!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv,
laopts, lv_layer(sl->seg->lv)))
return_0;
log_debug_activation("Finished adding external origin LV %s and all active users.",
display_lvname(external_lv));
return 1;
}
static int _add_new_cvol_subdev_to_dtree(struct dev_manager *dm,
struct dm_tree *dtree,
const struct logical_volume *lv,
struct lv_activate_opts *laopts,
struct lv_layer *lvlayer,
int meta_or_data)
{
const char *layer = meta_or_data ? "cmeta" : "cdata";
const struct lv_segment *lvseg = first_seg(lv);
uint64_t size = meta_or_data ? lvseg->metadata_len : lvseg->data_len;
const struct logical_volume *pool_lv = lvseg->pool_lv;
struct dm_tree_node *dnode;
char *dlid, *dlid_pool, *name;
union lvid lvid = { { lv->vg->id, _get_id_for_meta_or_data(lvseg, meta_or_data) } };
if (!(dlid = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid.s, layer)))
return_0;
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, layer)))
return_0;
if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid, -1, -1,
read_only_lv(lv, laopts, layer),
((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
lvlayer,
_get_udev_flags(dm, lv, layer, laopts->noscan,
laopts->temporary, 0))))
return_0;
if (dm->track_pending_delete) {
log_debug_activation("Using error for pending delete of %s-%s.",
display_lvname(lv), layer);
if (!dm_tree_node_add_error_target(dnode, size))
return_0;
} else {
/* add load_segment to meta dnode: linear, size of meta area */
if (!add_linear_area_to_dtree(dnode, size, lv->vg->extent_size,
lv->vg->cmd->use_linear_target,
lv->vg->name, lv->name))
return_0;
if (!(dlid_pool = build_dm_uuid(dm->mem, pool_lv, NULL)))
return_0;
/* add seg_area to prev load_seg: offset 0 maps to cachevol lv offset 0 */
if (!dm_tree_node_add_target_area(dnode, NULL, dlid_pool,
meta_or_data ? 0 : lvseg->metadata_len))
return_0;
}
return 1;
}
2005-11-09 16:08:41 +03:00
static int _add_segment_to_dtree(struct dev_manager *dm,
struct dm_tree *dtree,
struct dm_tree_node *dnode,
struct lv_segment *seg,
struct lv_activate_opts *laopts,
const char *layer)
{
uint32_t s;
struct lv_segment *seg_present;
const struct segment_type *segtype;
const char *target_name;
/* Ensure required device-mapper targets are loaded */
seg_present = find_snapshot(seg->lv) ? : seg;
segtype = seg_present->segtype;
target_name = (segtype->ops->target_name ?
segtype->ops->target_name(seg_present, laopts) :
segtype->name);
log_debug_activation("Checking kernel supports %s segment type for %s%s%s",
target_name, display_lvname(seg->lv),
layer ? "-" : "", layer ? : "");
if (segtype->ops->target_present &&
!segtype->ops->target_present(seg_present->lv->vg->cmd,
seg_present, NULL)) {
log_error("Can't process LV %s: %s target support missing "
"from kernel?", display_lvname(seg->lv), target_name);
return 0;
}
/* Add external origin layer */
if (seg->external_lv &&
!_add_new_external_lv_to_dtree(dm, dtree, seg->external_lv, laopts))
return_0;
/* Add mirror log */
if (seg->log_lv &&
!_add_new_lv_to_dtree(dm, dtree, seg->log_lv, laopts, NULL))
return_0;
/* Add pool metadata */
if (seg->metadata_lv &&
!_add_new_lv_to_dtree(dm, dtree, seg->metadata_lv, laopts, NULL))
return_0;
/* Add pool layer */
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if (seg->pool_lv && !laopts->origin_only &&
!_add_new_lv_to_dtree(dm, dtree, seg->pool_lv, laopts,
lv_layer(seg->pool_lv)))
return_0;
if (seg->writecache && !laopts->origin_only &&
!_add_new_lv_to_dtree(dm, dtree, seg->writecache, laopts,
lv_layer(seg->writecache)))
return_0;
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
if (seg->integrity_meta_dev && !laopts->origin_only &&
!_add_new_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, laopts,
lv_layer(seg->integrity_meta_dev)))
return_0;
/* Add any LVs used by this segment */
for (s = 0; s < seg->area_count; ++s) {
if ((seg_type(seg, s) == AREA_LV) &&
/* do not bring up tracked image */
!lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
/* origin only for cache without pending delete */
(!dm->track_pending_delete || !seg_is_cache(seg)) &&
!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
laopts,
lv_is_vdo_pool(seg_lv(seg, s)) ?
lv_layer(seg_lv(seg, s)) : NULL))
return_0;
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
!lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
!_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
laopts, NULL))
return_0;
}
if (dm->track_pending_delete) {
/* Replace target and all its used devs with error mapping */
log_debug_activation("Using error for pending delete %s.",
display_lvname(seg->lv));
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * _seg_len(seg)))
return_0;
} else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
return_0;
return 1;
}
2005-11-09 16:08:41 +03:00
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, struct lv_activate_opts *laopts,
const char *layer)
2005-10-17 22:21:05 +04:00
{
struct lv_segment *seg;
struct lv_layer *lvlayer;
struct seg_list *sl;
struct dm_list *snh;
2005-11-09 16:05:17 +03:00
struct dm_tree_node *dnode;
const struct dm_info *dinfo;
char *name, *dlid;
uint32_t max_stripe_size = UINT32_C(0);
uint32_t read_ahead = lv->read_ahead;
2007-11-29 18:04:12 +03:00
uint32_t read_ahead_flags = UINT32_C(0);
int save_pending_delete = dm->track_pending_delete;
int merge_in_progress = 0;
2005-10-17 22:21:05 +04:00
Allow dm-cache cache device to be standard LV If a single, standard LV is specified as the cache, use it directly instead of converting it into a cache-pool object with two separate LVs (for data and metadata). With a single LV as the cache, lvm will use blocks at the beginning for metadata, and the rest for data. Separate dm linear devices are set up to point at the metadata and data areas of the LV. These dm devs are given to the dm-cache target to use. The single LV cache cannot be resized without recreating it. If the --poolmetadata option is used to specify an LV for metadata, then a cache pool will be created (with separate LVs for data and metadata.) Usage: $ lvcreate -n main -L 128M vg /dev/loop0 $ lvcreate -n fast -L 64M vg /dev/loop1 $ lvs -a vg LV VG Attr LSize Type Devices main vg -wi-a----- 128.00m linear /dev/loop0(0) fast vg -wi-a----- 64.00m linear /dev/loop1(0) $ lvconvert --type cache --cachepool fast vg/main $ lvs -a vg LV VG Attr LSize Origin Pool Type Devices [fast] vg Cwi---C--- 64.00m linear /dev/loop1(0) main vg Cwi---C--- 128.00m [main_corig] [fast] cache main_corig(0) [main_corig] vg owi---C--- 128.00m linear /dev/loop0(0) $ lvchange -ay vg/main $ dmsetup ls vg-fast_cdata (253:4) vg-fast_cmeta (253:5) vg-main_corig (253:6) vg-main (253:24) vg-fast (253:3) $ dmsetup table vg-fast_cdata: 0 98304 linear 253:3 32768 vg-fast_cmeta: 0 32768 linear 253:3 0 vg-main_corig: 0 262144 linear 7:0 2048 vg-main: 0 262144 cache 253:5 253:4 253:6 128 2 metadata2 writethrough mq 0 vg-fast: 0 131072 linear 7:1 2048 $ lvchange -an vg/min $ lvconvert --splitcache vg/main $ lvs -a vg LV VG Attr LSize Type Devices fast vg -wi------- 64.00m linear /dev/loop1(0) main vg -wi------- 128.00m linear /dev/loop0(0)
2018-08-17 23:45:52 +03:00
if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
display_lvname(lv), layer);
return 0;
}
lvlayer->lv = lv;
lvlayer->visible_component = (laopts->component_lv == lv) ? 1 : 0;
log_debug_activation("Adding new LV %s%s%s to dtree", display_lvname(lv),
layer ? "-" : "", layer ? : "");
/* LV with pending delete is never put new into a table */
if (lv_is_pending_delete(lv) && !_cached_dm_info(dm->mem, dtree, lv, NULL))
return 1; /* Replace with error only when already exists */
if (lv_is_cache_pool(lv) &&
!dm_list_empty(&lv->segs_using_this_lv)) {
/* cache pool is 'meta' LV and does not have a real device node */
if (!_add_new_lv_to_dtree(dm, dtree, seg_lv(first_seg(lv), 0), laopts, NULL))
return_0;
if (!_add_new_lv_to_dtree(dm, dtree, first_seg(lv)->metadata_lv, laopts, NULL))
return_0;
return 1;
}
/* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
if (!layer && lv_is_merging_origin(lv)) {
seg = find_snapshot(lv);
/*
* Prevent merge if merge isn't currently possible:
* either origin or merging snapshot are open
* - for old snaps use "snapshot-merge" if it is already in use
* - open_count is always retrieved (as of dm-ioctl 4.7.0)
* so just use the tree's existing nodes' info
*/
if ((dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL))) {
/* Merging origin LV is present, check if mergins is already running. */
if ((seg_is_thin_volume(seg) && _lv_has_thin_device_id(dm->mem, lv, NULL, seg->device_id)) ||
(!seg_is_thin_volume(seg) && lv_has_target_type(dm->mem, lv, NULL, TARGET_NAME_SNAPSHOT_MERGE))) {
log_debug_activation("Merging of snapshot volume %s to origin %s is in progress.",
display_lvname(seg->lv), display_lvname(seg->lv));
merge_in_progress = 1; /* Merge is already running */
} /* Merge is not yet running, so check if it can be started */
else if (laopts->resuming) {
log_debug_activation("Postponing pending snapshot merge for origin %s, "
"merge was not started before suspend.",
display_lvname(lv));
laopts->no_merging = 1; /* Cannot be reloaded in suspend */
} /* Non-resuming merge requires origin to be unused */
else if (dinfo->open_count) {
log_debug_activation("Postponing pending snapshot merge for origin %s, "
"origin volume is opened.",
display_lvname(lv));
laopts->no_merging = 1;
}
}
/* If merge would be still undecided, look as snapshot */
if (!merge_in_progress && !laopts->no_merging &&
(dinfo = _cached_dm_info(dm->mem, dtree,
seg_is_thin_volume(seg) ?
seg->lv : seg->cow, NULL))) {
if (seg_is_thin_volume(seg)) {
/* Active thin snapshot prevents merge */
log_debug_activation("Postponing pending snapshot merge for origin volume %s, "
"merging thin snapshot volume %s is active.",
display_lvname(lv), display_lvname(seg->lv));
laopts->no_merging = 1;
} else if (dinfo->open_count) {
log_debug_activation("Postponing pending snapshot merge for origin volume %s, "
"merging snapshot volume %s is opened.",
display_lvname(lv), display_lvname(seg->lv));
laopts->no_merging = 1;
}
}
}
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
2005-10-17 22:21:05 +04:00
2016-11-08 18:20:51 +03:00
/* Even unused thin-pool still needs to get layered UUID -suffix */
if (!layer && lv_is_new_thin_pool(lv))
layer = lv_layer(lv);
/* Adds -real to the dm uuid of wcorig LV. */
if (!layer && lv_is_writecache_origin(lv))
layer = lv_layer(lv); /* "real" */
/*
* FIXME: we would like to have -private suffixes used for device not processed by udev
* however ATM we also sometimes want to provide /dev/vg/lv symlinks to such devices
* and also be able to correctly report its status with lvs.
*
* Until problems are resolved this code path needs to be disabled.
*/
if (0 && lvlayer->visible_component) {
/* Component LV will be public, do not add any layer suffixes */
if (!(dlid = dm_build_dm_uuid(dm->mem, UUID_PREFIX, lv->lvid.s, NULL)))
return_0;
} else if (!(dlid = build_dm_uuid(dm->mem, lv,layer)))
return_0;
2005-10-17 22:21:05 +04:00
/* We've already processed this node if it already has a context ptr */
2005-11-09 16:05:17 +03:00
if ((dnode = dm_tree_find_node_by_uuid(dtree, dlid)) &&
dm_tree_node_get_context(dnode))
return 1;
2005-10-17 22:21:05 +04:00
/*
2005-11-09 16:08:41 +03:00
* Add LV to dtree.
* If we're working with precommitted metadata, clear any
* existing inactive table left behind.
* Major/minor settings only apply to the visible layer.
*/
/* FIXME Move the clear from here until later, so we can leave
* identical inactive tables untouched. (For pvmove.)
*/
if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
layer ? UINT32_C(0) : (uint32_t) lv->major,
layer ? UINT32_C(0) : (uint32_t) lv->minor,
read_only_lv(lv, laopts, layer),
((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
lvlayer,
_get_udev_flags(dm, lv, layer, laopts->noscan, laopts->temporary,
lvlayer->visible_component))))
return_0;
/* Store existing name so we can do rename later */
2005-11-09 16:05:17 +03:00
lvlayer->old_name = dm_tree_node_get_name(dnode);
/* Create table */
dm->pvmove_mirror_count = 0u;
if (lv_is_pending_delete(lv))
2014-11-10 12:56:43 +03:00
/* Handle LVs with pending delete */
/* Fow now used only by cache segtype, TODO snapshots */
dm->track_pending_delete = 1;
2014-11-10 12:56:43 +03:00
if (lv_is_cache_vol(lv))
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (lv_is_cache(sl->seg->lv) &&
/* Cachevol is used by cache LV segment -> add cvol-cdata/cmeta extra layer */
(!_add_new_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, laopts, lvlayer, 0) ||
!_add_new_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, laopts, lvlayer, 1)))
return_0;
/* This is unused cache-pool - make metadata accessible */
if (lv_is_cache_pool(lv))
lv = first_seg(lv)->metadata_lv;
/* If this is a snapshot origin, add real LV */
/* If this is a snapshot origin + merging snapshot, add cow + real LV */
/* Snapshot origin could be also external origin */
if (lv_is_origin(lv) && !layer) {
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, "real"))
return_0;
if (!laopts->no_merging && lv_is_merging_origin(lv)) {
if (!_add_new_lv_to_dtree(dm, dtree,
2014-02-18 00:49:51 +04:00
find_snapshot(lv)->cow, laopts, "cow"))
return_0;
/*
* Must also add "real" LV for use when
* snapshot-merge target is added
*/
if (!_add_snapshot_merge_target_to_dtree(dm, dnode, lv))
return_0;
} else if (!_add_origin_target_to_dtree(dm, dnode, lv))
return_0;
/* Add any snapshots of this LV */
dm_list_iterate(snh, &lv->snapshot_segs)
if (!_add_new_lv_to_dtree(dm, dtree,
dm_list_struct_base(snh, struct lv_segment,
origin_list)->cow,
laopts, NULL))
return_0;
} else if (lv_is_cow(lv) && !layer) {
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, "cow"))
return_0;
if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts))
return_0;
} else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) ||
lv_is_vdo_pool(lv) ||
lv_is_external_origin(lv))) {
/* External origin or 'used' Thin pool or VDO pool is using layer */
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv)))
return_0;
if (!_add_layer_target_to_dtree(dm, dnode, lv))
return_0;
} else {
/* Add 'real' segments for LVs */
dm_list_iterate_items(seg, &lv->segments) {
if (!_add_segment_to_dtree(dm, dtree, dnode, seg, laopts, layer))
return_0;
if (max_stripe_size < seg->stripe_size * seg->area_count)
max_stripe_size = seg->stripe_size * seg->area_count;
}
if (!layer && lv_is_vdo_pool(lv) &&
!_add_layer_target_to_dtree(dm, dnode, lv))
return_0;
}
2005-10-17 22:21:05 +04:00
/* Setup thin pool callback */
if (lv_is_thin_pool(lv) && layer &&
!_pool_register_callback(dm, dnode, lv))
return_0;
if (lv_is_cache(lv) && !lv_is_cache_vol(first_seg(lv)->pool_lv) &&
/* Register callback only for layer activation or non-layered cache LV */
(layer || !lv_layer(lv)) &&
/* Register callback when metadata LV is NOT already active */
!_cached_dm_info(dm->mem, dtree, first_seg(first_seg(lv)->pool_lv)->metadata_lv, NULL) &&
!_pool_register_callback(dm, dnode, lv))
return_0;
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv) &&
/* Register callback only for layer activation or non-layered cache LV */
(layer || !lv_layer(lv)) &&
/* Register callback when cachevol LV is NOT already active */
!_cached_dm_info(dm->mem, dtree, first_seg(lv)->pool_lv, NULL) &&
!_pool_register_callback(dm, dnode, lv))
return_0;
/*
* Update tables for ANY PVMOVE holders for active LV where the name starts with 'pvmove',
* but it's not anymore PVMOVE LV and also it's not a PVMOVE _mimage LV.
* When resume happens, tables MUST be already preloaded with correct entries!
* (since we can't preload different table while devices are suspended)
*/
if (!lv_is_pvmove(lv) && !strncmp(lv->name, "pvmove", 6) && !strchr(lv->name, '_') &&
(dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL)))
if (!_add_holders_to_dtree(dm, dtree, lv, laopts, dinfo))
return_0;
if (read_ahead == DM_READ_AHEAD_AUTO) {
/* we need RA at least twice a whole stripe - see the comment in md/raid0.c */
read_ahead = max_stripe_size * 2;
/* FIXME: layered device read-ahead */
if (!read_ahead)
lv_calculate_readahead(lv, &read_ahead);
2007-11-29 18:04:12 +03:00
read_ahead_flags = DM_READ_AHEAD_MINIMUM_FLAG;
}
2007-11-29 18:04:12 +03:00
dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
/* Add any LVs referencing a PVMOVE LV unless told not to */
if (dm->track_pvmove_deps && lv_is_pvmove(lv))
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
return_0;
dm->track_pending_delete = save_pending_delete; /* restore */
return 1;
2005-10-17 22:21:05 +04:00
}
/* FIXME: symlinks should be created/destroyed at the same time
* as the kernel devices but we can't do that from within libdevmapper
* at present so we must walk the tree twice instead. */
/*
* Create LV symlinks for children of supplied root node.
*/
2005-11-09 16:05:17 +03:00
static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
2005-10-17 22:21:05 +04:00
{
void *handle = NULL;
2005-11-09 16:05:17 +03:00
struct dm_tree_node *child;
struct lv_layer *lvlayer;
char *old_vgname, *old_lvname, *old_layer;
char *new_vgname, *new_lvname, *new_layer;
const char *name;
int r = 1;
2005-10-17 22:21:05 +04:00
/* Nothing to do if udev fallback is disabled. */
if (!_check_udev_fallback(dm->cmd)) {
fs_set_create();
return 1;
}
2005-11-09 16:05:17 +03:00
while ((child = dm_tree_next_child(&handle, root, 0))) {
2009-12-03 12:59:54 +03:00
if (!(lvlayer = dm_tree_node_get_context(child)))
continue;
2005-10-17 22:21:05 +04:00
/* Detect rename */
2005-11-09 16:05:17 +03:00
name = dm_tree_node_get_name(child);
2005-10-17 22:21:05 +04:00
if (name && lvlayer->old_name && *lvlayer->old_name && strcmp(name, lvlayer->old_name)) {
if (!dm_split_lvm_name(dm->mem, lvlayer->old_name, &old_vgname, &old_lvname, &old_layer)) {
2016-12-01 16:53:35 +03:00
log_error("_create_lv_symlinks: Couldn't split up old device name %s.", lvlayer->old_name);
2008-01-30 17:00:02 +03:00
return 0;
}
if (!dm_split_lvm_name(dm->mem, name, &new_vgname, &new_lvname, &new_layer)) {
2016-12-01 16:53:35 +03:00
log_error("_create_lv_symlinks: Couldn't split up new device name %s.", name);
return 0;
}
if (!fs_rename_lv(lvlayer->lv, name, old_vgname, old_lvname))
r = 0;
continue;
}
if (_lv_has_mknode(lvlayer->lv) || lvlayer->visible_component) {
if (!_dev_manager_lv_mknodes(lvlayer->lv))
r = 0;
continue;
}
if (!_dev_manager_lv_rmnodes(lvlayer->lv))
r = 0;
2005-10-17 22:21:05 +04:00
}
return r;
}
/*
* Remove LV symlinks for children of supplied root node.
*/
static int _remove_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
{
void *handle = NULL;
struct dm_tree_node *child;
char *vgname, *lvname, *layer;
int r = 1;
/* Nothing to do if udev fallback is disabled. */
if (!_check_udev_fallback(dm->cmd))
return 1;
while ((child = dm_tree_next_child(&handle, root, 0))) {
2008-01-30 17:00:02 +03:00
if (!dm_split_lvm_name(dm->mem, dm_tree_node_get_name(child), &vgname, &lvname, &layer)) {
r = 0;
continue;
}
if (!*vgname)
continue;
/* only top level layer has symlinks */
if (*layer)
continue;
fs_del_lv_byname(dm->cmd->dev_dir, vgname, lvname,
dm->cmd->current_settings.udev_rules);
}
return r;
}
static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, const char *non_toplevel_tree_dlid)
{
void *handle = NULL;
2005-11-09 16:05:17 +03:00
struct dm_tree_node *child;
char *vgname, *lvname, *layer;
const char *name, *uuid;
struct dm_str_list *dl;
2005-11-09 16:05:17 +03:00
while ((child = dm_tree_next_child(&handle, root, 0))) {
if (!(name = dm_tree_node_get_name(child)))
continue;
2005-11-09 16:05:17 +03:00
if (!(uuid = dm_tree_node_get_uuid(child)))
continue;
2008-01-30 17:00:02 +03:00
if (!dm_split_lvm_name(dm->mem, name, &vgname, &lvname, &layer)) {
log_error("_clean_tree: Couldn't split up device name %s.", name);
return 0;
}
/* Not meant to be top level? */
if (!*layer)
continue;
/* If operation was performed on a partial tree, don't remove it */
if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
continue;
if (!(uuid = dm_pool_strdup(dm->cmd->pending_delete_mem, uuid))) {
log_error("_clean_tree: Failed to duplicate uuid.");
return 0;
}
if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, uuid))
2009-08-03 22:01:45 +04:00
return_0;
}
/* Deactivate any tracked pending delete nodes */
if (!dm_list_empty(&dm->cmd->pending_delete) && !dm_get_suspended_counter()) {
fs_unlock();
dm_tree_set_cookie(root, fs_get_cookie());
dm_list_iterate_items(dl, &dm->cmd->pending_delete) {
log_debug_activation("Deleting tracked UUID %s.", dl->str);
if (!dm_tree_deactivate_children(root, dl->str, strlen(dl->str)))
return_0;
}
dm_list_init(&dm->cmd->pending_delete);
dm_pool_empty(dm->cmd->pending_delete_mem);
}
return 1;
2005-10-17 22:21:05 +04:00
}
static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
struct lv_activate_opts *laopts, action_t action)
2005-10-17 22:21:05 +04:00
{
2014-11-13 15:15:58 +03:00
static const char _action_names[][24] = {
"PRELOAD", "ACTIVATE", "DEACTIVATE", "SUSPEND", "SUSPEND_WITH_LOCKFS", "CLEAN"
};
const size_t DLID_SIZE = ID_LEN + sizeof(UUID_PREFIX) - 1;
2005-11-09 16:05:17 +03:00
struct dm_tree *dtree;
struct dm_tree_node *root;
char *dlid;
2005-10-17 22:21:05 +04:00
int r = 0;
if (action < DM_ARRAY_SIZE(_action_names))
log_debug_activation("Creating %s%s tree for %s.",
_action_names[action],
(laopts->origin_only) ? " origin-only" : "",
display_lvname(lv));
2017-02-04 17:44:25 +03:00
/* Some LV cannot be used for top level tree */
/* TODO: add more.... */
if (lv_is_cache_pool(lv) && !dm_list_empty(&lv->segs_using_this_lv)) {
log_error(INTERNAL_ERROR "Cannot create tree for %s.",
display_lvname(lv));
return 0;
}
/* Some targets may build bigger tree for activation */
dm->activation = ((action == PRELOAD) || (action == ACTIVATE));
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
dm->suspend = (action == SUSPEND_WITH_LOCKFS) || (action == SUSPEND);
/* Drop any cache before DM table manipulation within locked section
* TODO: check if it makes sense to manage cache within lock */
dev_cache_destroy_dm_uuids();
dtree = _create_partial_dtree(dm, lv, laopts->origin_only);
if (!dtree)
return_0;
2005-11-09 16:05:17 +03:00
if (!(root = dm_tree_find_node(dtree, 0, 0))) {
2016-12-01 16:53:35 +03:00
log_error("Lost dependency tree root node.");
goto out_no_root;
2005-10-17 22:21:05 +04:00
}
/* Restore fs cookie */
dm_tree_set_cookie(root, fs_get_cookie());
if (!(dlid = build_dm_uuid(dm->mem, lv, laopts->origin_only ? lv_layer(lv) : NULL)))
goto_out;
2005-10-17 22:21:05 +04:00
2005-10-19 17:59:18 +04:00
/* Only process nodes with uuid of "LVM-" plus VG id. */
switch(action) {
case CLEAN:
if (retry_deactivation())
dm_tree_retry_remove(root);
/* Deactivate any unused non-toplevel nodes */
if (!_clean_tree(dm, root, laopts->origin_only ? dlid : NULL))
goto_out;
break;
case DEACTIVATE:
if (retry_deactivation())
dm_tree_retry_remove(root);
/* Deactivate LV and all devices it references that nothing else has open. */
if (!dm_tree_deactivate_children(root, dlid, DLID_SIZE))
2009-08-03 22:01:45 +04:00
goto_out;
if (!_remove_lv_symlinks(dm, root))
log_warn("Failed to remove all device symlinks associated with %s.",
display_lvname(lv));
break;
case SUSPEND:
dm_tree_skip_lockfs(root);
if (!dm->flush_required)
dm_tree_use_no_flush_suspend(root);
/* Fall through */
case SUSPEND_WITH_LOCKFS:
if (!dm_tree_suspend_children(root, dlid, DLID_SIZE))
goto_out;
break;
case PRELOAD:
case ACTIVATE:
/* Add all required new devices to tree */
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts,
(lv_is_origin(lv) && laopts->origin_only) ? "real" :
(laopts->origin_only &&
(lv_is_thin_pool(lv) ||
lv_is_vdo_pool(lv))) ?
lv_layer(lv) : NULL))
goto_out;
/* Preload any devices required before any suspensions */
if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
2009-08-03 22:01:45 +04:00
goto_out;
if ((dm_tree_node_size_changed(root) < 0))
dm->flush_required = 1;
/* Currently keep the code require flush for any
* non 'thin pool/volume' and size increase */
else if (!lv_is_thin_volume(lv) &&
!lv_is_thin_pool(lv) &&
!lv_is_vdo(lv) &&
!lv_is_vdo_pool(lv) &&
dm_tree_node_size_changed(root))
dm->flush_required = 1;
if (action == ACTIVATE) {
if (!dm_tree_activate_children(root, dlid, DLID_SIZE))
goto_out;
if (!_create_lv_symlinks(dm, root))
log_warn("Failed to create symlinks for %s.",
display_lvname(lv));
}
break;
default:
log_error(INTERNAL_ERROR "_tree_action: Action %u not supported.", action);
2005-10-17 22:21:05 +04:00
goto out;
2009-12-03 12:58:30 +03:00
}
2005-10-17 22:21:05 +04:00
r = 1;
out:
/* Save fs cookie for udev settle, do not wait here */
fs_set_cookie(dm_tree_get_cookie(root));
out_no_root:
2005-11-09 16:05:17 +03:00
dm_tree_free(dtree);
2005-10-17 22:21:05 +04:00
return r;
}
/* origin_only may only be set if we are resuming (not activating) an origin LV */
int dev_manager_activate(struct dev_manager *dm, const struct logical_volume *lv,
struct lv_activate_opts *laopts)
{
if (!_tree_action(dm, lv, laopts, ACTIVATE))
return_0;
if (!_tree_action(dm, lv, laopts, CLEAN))
return_0;
return 1;
}
/* origin_only may only be set if we are resuming (not activating) an origin LV */
int dev_manager_preload(struct dev_manager *dm, const struct logical_volume *lv,
struct lv_activate_opts *laopts, int *flush_required)
{
dm->flush_required = *flush_required;
if (!_tree_action(dm, lv, laopts, PRELOAD))
return_0;
*flush_required = dm->flush_required;
return 1;
}
2005-10-26 18:13:52 +04:00
int dev_manager_deactivate(struct dev_manager *dm, const struct logical_volume *lv)
{
struct lv_activate_opts laopts = { 0 };
if (!_tree_action(dm, lv, &laopts, DEACTIVATE))
return_0;
return 1;
}
int dev_manager_suspend(struct dev_manager *dm, const struct logical_volume *lv,
struct lv_activate_opts *laopts, int lockfs, int flush_required)
2005-10-26 18:13:52 +04:00
{
dm->flush_required = flush_required;
if (!_tree_action(dm, lv, laopts, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND))
return_0;
return 1;
2005-10-26 18:13:52 +04:00
}
/*
* Does device use VG somewhere in its construction?
* Returns 1 if uncertain.
*/
int dev_manager_device_uses_vg(struct device *dev,
struct volume_group *vg)
{
2005-11-09 16:05:17 +03:00
struct dm_tree *dtree;
struct dm_tree_node *root;
char dlid[sizeof(UUID_PREFIX) + sizeof(struct id) - 1] __attribute__((aligned(8)));
int r = 1;
2005-11-09 16:05:17 +03:00
if (!(dtree = dm_tree_create())) {
2016-12-01 16:53:35 +03:00
log_error("Failed to create partial dtree.");
return r;
}
dm_tree_set_optional_uuid_suffixes(dtree, (const char**)_uuid_suffix_list);
if (!dm_tree_add_dev(dtree, MAJOR(dev->dev), MINOR(dev->dev))) {
log_error("Failed to add device %s (%u:%u) to dtree.",
dev_name(dev), MAJOR(dev->dev), MINOR(dev->dev));
goto out;
}
memcpy(dlid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1);
memcpy(dlid + sizeof(UUID_PREFIX) - 1, &vg->id.uuid[0], sizeof(vg->id));
2005-11-09 16:05:17 +03:00
if (!(root = dm_tree_find_node(dtree, 0, 0))) {
2016-12-01 16:53:35 +03:00
log_error("Lost dependency tree root node.");
goto out;
}
2005-11-09 16:05:17 +03:00
if (dm_tree_children_use_uuid(root, dlid, sizeof(UUID_PREFIX) + sizeof(vg->id) - 1))
goto_out;
r = 0;
out:
2005-11-09 16:05:17 +03:00
dm_tree_free(dtree);
2016-12-01 16:53:35 +03:00
return r;
}
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
/*
* crypt offset is usually the LUKS header size but can be larger.
* The LUKS header is usually 2MB for LUKS1 and 16MB for LUKS2.
* The offset needs to be subtracted from the LV size to get the
* size used to resize the crypt device.
*/
int get_crypt_table_offset(dev_t crypt_devt, uint32_t *offset_bytes)
{
2023-11-06 16:39:30 +03:00
struct dm_task *dmt;
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
uint64_t start, length;
char *target_type = NULL;
void *next = NULL;
char *params = NULL;
char offset_str[32] = { 0 };
int copy_offset = 0;
int spaces = 0;
unsigned i, i_off = 0;
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
2023-11-06 16:39:30 +03:00
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
MAJOR(crypt_devt), MINOR(crypt_devt), 0, 0, 0)))
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
return_0;
next = dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type || !params || strcmp(target_type, "crypt")) {
dm_task_destroy(dmt);
return_0;
}
/*
* get offset from params string:
* <cipher> <key> <iv_offset> <device> <offset> [<#opt_params> <opt_params>]
* <offset> is reported in 512 byte sectors.
*/
for (i = 0; params[i]; i++) {
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
if (params[i] == ' ') {
spaces++;
if (spaces == 4)
copy_offset = 1;
if (spaces == 5)
break;
continue;
}
if (!copy_offset)
continue;
offset_str[i_off++] = params[i];
if (i_off == sizeof(offset_str)) {
offset_str[0] = '\0';
break;
}
}
dm_task_destroy(dmt);
if (!offset_str[0])
return_0;
*offset_bytes = ((uint32_t)strtoul(offset_str, NULL, 0) * 512);
return 1;
}