mirror of
git://sourceware.org/git/lvm2.git
synced 2025-10-05 07:33:15 +03:00
Compare commits
36 Commits
dev-dct-cm
...
dev-dct-cm
Author | SHA1 | Date | |
---|---|---|---|
|
6626f110ca | ||
|
3959308276 | ||
|
4cfc5858ce | ||
|
0e6691d39f | ||
|
1677bfd0a8 | ||
|
b019246502 | ||
|
ce2aa95601 | ||
|
e056d4e16f | ||
|
dc5c46d733 | ||
|
fe432fe19d | ||
|
4c70f999e0 | ||
|
96c52f1b40 | ||
|
3736656601 | ||
|
d05f32cfa0 | ||
|
f82aefeb87 | ||
|
0a0d991bb5 | ||
|
f98b4f3dc2 | ||
|
46ad67b31c | ||
|
6c0dbea70a | ||
|
4e9ca81960 | ||
|
bb1f0db3a6 | ||
|
40ef88f0da | ||
|
2700ee4217 | ||
|
bec892a24f | ||
|
c590594aa5 | ||
|
56ffa0f0dd | ||
|
c87d9704f8 | ||
|
f30977d4bf | ||
|
abb408a22a | ||
|
edda3f5202 | ||
|
78df117bdf | ||
|
1e551f4c78 | ||
|
773ebc72bb | ||
|
33760d3dcc | ||
|
b020ce3b6e | ||
|
6f25f2f719 |
10
WHATS_NEW
10
WHATS_NEW
@@ -1,15 +1,5 @@
|
||||
Version 2.02.169 -
|
||||
=====================================
|
||||
Support region size changes on existing RaidLVs
|
||||
Avoid parallel usage of cpg_mcast_joined() in clvmd with corosync.
|
||||
Support raid6_{ls,rs,la,ra}_6 segment types and conversions from/to it.
|
||||
Support raid6_n_6 segment type and conversions from/to it.
|
||||
Support raid5_n segment type and conversions from/to it.
|
||||
Support new internal command _dmeventd_thin_command.
|
||||
Introduce new dmeventd/thin_command configurable setting.
|
||||
Use new default units 'r' for displaying sizes.
|
||||
Also unmount mount point on top of MD device if using blkdeactivate -u.
|
||||
Restore check preventing resize of cache type volumes (2.02.158).
|
||||
Add missing udev sync when flushing dirty cache content.
|
||||
vgchange -p accepts only uint32 numbers.
|
||||
Report thin LV date for merged LV when the merge is in progress.
|
||||
|
@@ -1,7 +1,5 @@
|
||||
Version 1.02.138 -
|
||||
=====================================
|
||||
Support configurable command executed from dmeventd thin plugin.
|
||||
Support new R|r human readable units output format.
|
||||
Thin dmeventd plugin reacts faster on lvextend failure path with umount.
|
||||
Add dm_stats_bind_from_fd() to bind a stats handle from a file descriptor.
|
||||
Do not try call callback when reverting activation on error path.
|
||||
|
@@ -665,7 +665,7 @@ global {
|
||||
|
||||
# Configuration option global/units.
|
||||
# Default value for --units argument.
|
||||
units = "r"
|
||||
units = "h"
|
||||
|
||||
# Configuration option global/si_unit_consistency.
|
||||
# Distinguish between powers of 1024 and 1000 bytes.
|
||||
@@ -1156,8 +1156,7 @@ activation {
|
||||
# Configuration option activation/missing_stripe_filler.
|
||||
# Method to fill missing stripes when activating an incomplete LV.
|
||||
# Using 'error' will make inaccessible parts of the device return I/O
|
||||
# errors on access. Using 'zero' will return success (and zero) on I/O
|
||||
# You can instead use a device path, in which case,
|
||||
# errors on access. You can instead use a device path, in which case,
|
||||
# that device will be used in place of missing stripes. Using anything
|
||||
# other than 'error' with mirrored or snapshotted volumes is likely to
|
||||
# result in data corruption.
|
||||
@@ -2049,14 +2048,6 @@ dmeventd {
|
||||
# warning is repeated when 85%, 90% and 95% of the pool is filled.
|
||||
thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
|
||||
# Configuration option dmeventd/thin_command.
|
||||
# The plugin runs command with each 5% increment when thin-pool data volume
|
||||
# or metadata volume gets above 50%.
|
||||
# Command which starts with 'lvm ' prefix is internal lvm command.
|
||||
# You can write your own handler to customise behaviour in more details.
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_command = "lvm lvextend --use-policies"
|
||||
|
||||
# Configuration option dmeventd/executable.
|
||||
# The full path to the dmeventd binary.
|
||||
# This configuration option has an automatic default value.
|
||||
|
@@ -532,7 +532,6 @@ static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
|
||||
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
|
||||
const char *errtext)
|
||||
{
|
||||
static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
struct iovec iov[2];
|
||||
cs_error_t err;
|
||||
int target_node;
|
||||
@@ -547,10 +546,7 @@ static int _cluster_send_message(const void *buf, int msglen, const char *csid,
|
||||
iov[1].iov_base = (char *)buf;
|
||||
iov[1].iov_len = msglen;
|
||||
|
||||
pthread_mutex_lock(&_mutex);
|
||||
err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2);
|
||||
pthread_mutex_unlock(&_mutex);
|
||||
|
||||
return cs_to_errno(err);
|
||||
}
|
||||
|
||||
|
@@ -121,7 +121,6 @@ int dmeventd_lvm2_run(const char *cmdline)
|
||||
int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
|
||||
const char *cmd, const char *device)
|
||||
{
|
||||
static char _internal_prefix[] = "_dmeventd_";
|
||||
char *vg = NULL, *lv = NULL, *layer;
|
||||
int r;
|
||||
|
||||
@@ -136,21 +135,6 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
|
||||
(layer = strstr(lv, "_mlog")))
|
||||
*layer = '\0';
|
||||
|
||||
if (!strncmp(cmd, _internal_prefix, sizeof(_internal_prefix) - 1)) {
|
||||
dmeventd_lvm2_lock();
|
||||
/* output of internal command passed via env var */
|
||||
if (!dmeventd_lvm2_run(cmd))
|
||||
cmd = NULL;
|
||||
else if ((cmd = getenv(cmd)))
|
||||
cmd = dm_pool_strdup(mem, cmd); /* copy with lock */
|
||||
dmeventd_lvm2_unlock();
|
||||
|
||||
if (!cmd) {
|
||||
log_error("Unable to find configured command.");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
r = dm_snprintf(buffer, size, "%s %s/%s", cmd, vg, lv);
|
||||
|
||||
dm_pool_free(mem, vg);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -40,26 +40,17 @@
|
||||
|
||||
#define UMOUNT_COMMAND "/bin/umount"
|
||||
|
||||
#define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */
|
||||
#define MAX_FAILS (10)
|
||||
|
||||
#define THIN_DEBUG 0
|
||||
|
||||
struct dso_state {
|
||||
struct dm_pool *mem;
|
||||
int metadata_percent_check;
|
||||
int metadata_percent;
|
||||
int metadata_warn_once;
|
||||
int data_percent_check;
|
||||
int data_percent;
|
||||
int data_warn_once;
|
||||
uint64_t known_metadata_size;
|
||||
uint64_t known_data_size;
|
||||
unsigned fails;
|
||||
unsigned max_fails;
|
||||
int restore_sigset;
|
||||
sigset_t old_sigset;
|
||||
pid_t pid;
|
||||
char **argv;
|
||||
char cmd_str[1024];
|
||||
};
|
||||
|
||||
@@ -67,95 +58,259 @@ DM_EVENT_LOG_FN("thin")
|
||||
|
||||
#define UUID_PREFIX "LVM-"
|
||||
|
||||
static int _run_command(struct dso_state *state)
|
||||
/* Figure out device UUID has LVM- prefix and is OPEN */
|
||||
static int _has_unmountable_prefix(int major, int minor)
|
||||
{
|
||||
char val[3][36];
|
||||
char *env[] = { val[0], val[1], val[2], NULL };
|
||||
int i;
|
||||
struct dm_task *dmt;
|
||||
struct dm_info info;
|
||||
const char *uuid;
|
||||
int r = 0;
|
||||
|
||||
/* Mark for possible lvm2 command we are running from dmeventd
|
||||
* lvm2 will not try to talk back to dmeventd while processing it */
|
||||
(void) dm_snprintf(val[0], sizeof(val[0]), "LVM_RUN_BY_DMEVENTD=1");
|
||||
if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
|
||||
return_0;
|
||||
|
||||
if (state->data_percent) {
|
||||
/* Prepare some known data to env vars for easy use */
|
||||
(void) dm_snprintf(val[1], sizeof(val[1]), "DMEVENTD_THIN_POOL_DATA=%d",
|
||||
state->data_percent / DM_PERCENT_1);
|
||||
(void) dm_snprintf(val[2], sizeof(val[2]), "DMEVENTD_THIN_POOL_METADATA=%d",
|
||||
state->metadata_percent / DM_PERCENT_1);
|
||||
} else {
|
||||
/* For an error event it's for a user to check status and decide */
|
||||
env[1] = NULL;
|
||||
log_debug("Error event processing");
|
||||
if (!dm_task_set_major_minor(dmt, major, minor, 1))
|
||||
goto_out;
|
||||
|
||||
if (!dm_task_no_flush(dmt))
|
||||
stack;
|
||||
|
||||
if (!dm_task_run(dmt))
|
||||
goto out;
|
||||
|
||||
if (!dm_task_get_info(dmt, &info))
|
||||
goto out;
|
||||
|
||||
if (!info.exists || !info.open_count)
|
||||
goto out; /* Not open -> not mounted */
|
||||
|
||||
if (!(uuid = dm_task_get_uuid(dmt)))
|
||||
goto out;
|
||||
|
||||
/* Check it's public mountable LV
|
||||
* has prefix LVM- and UUID size is 68 chars */
|
||||
if (memcmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1) ||
|
||||
strlen(uuid) != 68)
|
||||
goto out;
|
||||
|
||||
#if THIN_DEBUG
|
||||
log_debug("Found logical volume %s (%u:%u).", uuid, major, minor);
|
||||
#endif
|
||||
r = 1;
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Get dependencies for device, and try to find matching device */
|
||||
static int _has_deps(const char *name, int tp_major, int tp_minor, int *dev_minor)
|
||||
{
|
||||
struct dm_task *dmt;
|
||||
const struct dm_deps *deps;
|
||||
struct dm_info info;
|
||||
int major, minor;
|
||||
int r = 0;
|
||||
|
||||
if (!(dmt = dm_task_create(DM_DEVICE_DEPS)))
|
||||
return 0;
|
||||
|
||||
if (!dm_task_set_name(dmt, name))
|
||||
goto out;
|
||||
|
||||
if (!dm_task_no_open_count(dmt))
|
||||
goto out;
|
||||
|
||||
if (!dm_task_run(dmt))
|
||||
goto out;
|
||||
|
||||
if (!dm_task_get_info(dmt, &info))
|
||||
goto out;
|
||||
|
||||
if (!(deps = dm_task_get_deps(dmt)))
|
||||
goto out;
|
||||
|
||||
if (!info.exists || deps->count != 1)
|
||||
goto out;
|
||||
|
||||
major = (int) MAJOR(deps->device[0]);
|
||||
minor = (int) MINOR(deps->device[0]);
|
||||
if ((major != tp_major) || (minor != tp_minor))
|
||||
goto out;
|
||||
|
||||
*dev_minor = info.minor;
|
||||
|
||||
if (!_has_unmountable_prefix(major, info.minor))
|
||||
goto out;
|
||||
|
||||
#if THIN_DEBUG
|
||||
{
|
||||
char dev_name[PATH_MAX];
|
||||
if (dm_device_get_name(major, minor, 0, dev_name, sizeof(dev_name)))
|
||||
log_debug("Found %s (%u:%u) depends on %s.",
|
||||
name, major, *dev_minor, dev_name);
|
||||
}
|
||||
#endif
|
||||
r = 1;
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Get all active devices */
|
||||
static int _find_all_devs(dm_bitset_t bs, int tp_major, int tp_minor)
|
||||
{
|
||||
struct dm_task *dmt;
|
||||
struct dm_names *names;
|
||||
unsigned next = 0;
|
||||
int minor, r = 1;
|
||||
|
||||
if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
|
||||
return 0;
|
||||
|
||||
if (!dm_task_run(dmt)) {
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
log_verbose("Executing command: %s", state->cmd_str);
|
||||
if (!(names = dm_task_get_names(dmt))) {
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
* Support parallel run of 'task' and it's waitpid maintainence
|
||||
* ATM we can't handle signaling of SIGALRM
|
||||
* as signalling is not allowed while 'process_event()' is running
|
||||
*/
|
||||
if (!(state->pid = fork())) {
|
||||
/* child */
|
||||
(void) close(0);
|
||||
for (i = 3; i < 255; ++i) (void) close(i);
|
||||
execve(state->argv[0], state->argv, env);
|
||||
_exit(errno);
|
||||
} else if (state->pid == -1) {
|
||||
log_error("Can't fork command %s.", state->cmd_str);
|
||||
state->fails = 1;
|
||||
return 0;
|
||||
if (!names->dev)
|
||||
goto out;
|
||||
|
||||
do {
|
||||
names = (struct dm_names *)((char *) names + next);
|
||||
if (_has_deps(names->name, tp_major, tp_minor, &minor))
|
||||
dm_bit_set(bs, minor);
|
||||
next = names->next;
|
||||
} while (next);
|
||||
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _run(const char *cmd, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int argc = 1; /* for argv[0], i.e. cmd */
|
||||
int i = 0;
|
||||
const char **argv;
|
||||
pid_t pid = fork();
|
||||
int status;
|
||||
|
||||
if (pid == 0) { /* child */
|
||||
va_start(ap, cmd);
|
||||
while (va_arg(ap, const char *))
|
||||
++argc;
|
||||
va_end(ap);
|
||||
|
||||
/* + 1 for the terminating NULL */
|
||||
argv = alloca(sizeof(const char *) * (argc + 1));
|
||||
|
||||
argv[0] = cmd;
|
||||
va_start(ap, cmd);
|
||||
while ((argv[++i] = va_arg(ap, const char *)));
|
||||
va_end(ap);
|
||||
|
||||
execvp(cmd, (char **)argv);
|
||||
log_sys_error("exec", cmd);
|
||||
exit(127);
|
||||
}
|
||||
|
||||
if (pid > 0) { /* parent */
|
||||
if (waitpid(pid, &status, 0) != pid)
|
||||
return 0; /* waitpid failed */
|
||||
if (!WIFEXITED(status) || WEXITSTATUS(status))
|
||||
return 0; /* the child failed */
|
||||
}
|
||||
|
||||
if (pid < 0)
|
||||
return 0; /* fork failed */
|
||||
|
||||
return 1; /* all good */
|
||||
}
|
||||
|
||||
struct mountinfo_s {
|
||||
const char *device;
|
||||
struct dm_info info;
|
||||
dm_bitset_t minors; /* Bitset for active thin pool minors */
|
||||
};
|
||||
|
||||
static int _umount_device(char *buffer, unsigned major, unsigned minor,
|
||||
char *target, void *cb_data)
|
||||
{
|
||||
struct mountinfo_s *data = cb_data;
|
||||
char *words[10];
|
||||
|
||||
if ((major == data->info.major) && dm_bit(data->minors, minor)) {
|
||||
if (dm_split_words(buffer, DM_ARRAY_SIZE(words), 0, words) < DM_ARRAY_SIZE(words))
|
||||
words[9] = NULL; /* just don't show device name */
|
||||
log_info("Unmounting thin %s (%d:%d) of thin pool %s (%u:%u) from mount point \"%s\".",
|
||||
words[9] ? : "", major, minor, data->device,
|
||||
data->info.major, data->info.minor,
|
||||
target);
|
||||
if (!_run(UMOUNT_COMMAND, "-fl", target, NULL))
|
||||
log_error("Failed to lazy umount thin %s (%d:%d) from %s: %s.",
|
||||
words[9], major, minor, target, strerror(errno));
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find all thin pool LV users and try to umount them.
|
||||
* TODO: work with read-only thin pool support
|
||||
*/
|
||||
static void _umount(struct dm_task *dmt)
|
||||
{
|
||||
/* TODO: Convert to use hash to reduce memory usage */
|
||||
static const size_t MINORS = (1U << 20); /* 20 bit */
|
||||
struct mountinfo_s data = { NULL };
|
||||
|
||||
if (!dm_task_get_info(dmt, &data.info))
|
||||
return;
|
||||
|
||||
data.device = dm_task_get_name(dmt);
|
||||
|
||||
if (!(data.minors = dm_bitset_create(NULL, MINORS))) {
|
||||
log_error("Failed to allocate bitset. Not unmounting %s.", data.device);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!_find_all_devs(data.minors, data.info.major, data.info.minor)) {
|
||||
log_error("Failed to detect mounted volumes for %s.", data.device);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!dm_mountinfo_read(_umount_device, &data)) {
|
||||
log_error("Could not parse mountinfo file.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (data.minors)
|
||||
dm_bitset_destroy(data.minors);
|
||||
}
|
||||
|
||||
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
|
||||
{
|
||||
#if THIN_DEBUG
|
||||
log_debug("dmeventd executes: %s.", state->cmd_str);
|
||||
#endif
|
||||
if (state->argv)
|
||||
return _run_command(state);
|
||||
|
||||
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
|
||||
log_error("Failed command for %s.", dm_task_get_name(dmt));
|
||||
state->fails = 1;
|
||||
log_error("Failed to extend thin pool %s.",
|
||||
dm_task_get_name(dmt));
|
||||
state->fails++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
state->fails = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Check if executed command has finished
|
||||
* Only 1 command may run */
|
||||
static int _wait_for_pid(struct dso_state *state)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
if (state->pid == -1)
|
||||
return 1;
|
||||
|
||||
if (!waitpid(state->pid, &status, WNOHANG))
|
||||
return 0;
|
||||
|
||||
/* Wait for finish */
|
||||
if (WIFEXITED(status)) {
|
||||
log_verbose("Child %d exited with status %d.",
|
||||
state->pid, WEXITSTATUS(status));
|
||||
state->fails = WEXITSTATUS(status) ? 1 : 0;
|
||||
} else {
|
||||
if (WIFSIGNALED(status))
|
||||
log_verbose("Child %d was terminated with status %d.",
|
||||
state->pid, WTERMSIG(status));
|
||||
state->fails = 1;
|
||||
}
|
||||
|
||||
state->pid = -1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -164,6 +319,7 @@ void process_event(struct dm_task *dmt,
|
||||
void **user)
|
||||
{
|
||||
const char *device = dm_task_get_name(dmt);
|
||||
int percent;
|
||||
struct dso_state *state = *user;
|
||||
struct dm_status_thin_pool *tps = NULL;
|
||||
void *next = NULL;
|
||||
@@ -171,6 +327,7 @@ void process_event(struct dm_task *dmt,
|
||||
char *target_type = NULL;
|
||||
char *params;
|
||||
int needs_policy = 0;
|
||||
int needs_umount = 0;
|
||||
struct dm_task *new_dmt = NULL;
|
||||
|
||||
#if THIN_DEBUG
|
||||
@@ -178,15 +335,14 @@ void process_event(struct dm_task *dmt,
|
||||
dm_percent_to_float(state->data_percent_check),
|
||||
dm_percent_to_float(state->metadata_percent_check));
|
||||
#endif
|
||||
if (!_wait_for_pid(state)) {
|
||||
log_warn("WARNING: Skipping event, child %d is still running (%s).",
|
||||
state->pid, state->cmd_str);
|
||||
return;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* No longer monitoring, waiting for remove */
|
||||
if (!state->meta_percent_check && !state->data_percent_check)
|
||||
return;
|
||||
#endif
|
||||
if (event & DM_EVENT_DEVICE_ERROR) {
|
||||
/* Error -> no need to check and do instant resize */
|
||||
state->data_percent = state->metadata_percent = 0;
|
||||
if (_use_policy(dmt, state))
|
||||
goto out;
|
||||
|
||||
@@ -224,6 +380,7 @@ void process_event(struct dm_task *dmt,
|
||||
|
||||
if (!dm_get_status_thin_pool(state->mem, params, &tps)) {
|
||||
log_error("Failed to parse status.");
|
||||
needs_umount = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -238,114 +395,72 @@ void process_event(struct dm_task *dmt,
|
||||
if (state->known_metadata_size != tps->total_metadata_blocks) {
|
||||
state->metadata_percent_check = CHECK_MINIMUM;
|
||||
state->known_metadata_size = tps->total_metadata_blocks;
|
||||
state->fails = 0;
|
||||
}
|
||||
|
||||
if (state->known_data_size != tps->total_data_blocks) {
|
||||
state->data_percent_check = CHECK_MINIMUM;
|
||||
state->known_data_size = tps->total_data_blocks;
|
||||
state->fails = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger action when threshold boundary is exceeded.
|
||||
* Report 80% threshold warning when it's used above 80%.
|
||||
* Only 100% is exception as it cannot be surpased so policy
|
||||
* action is called for: >50%, >55% ... >95%, 100%
|
||||
*/
|
||||
state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
|
||||
if (state->metadata_percent <= WARNING_THRESH)
|
||||
state->metadata_warn_once = 0; /* Dropped bellow threshold, reset warn once */
|
||||
else if (!state->metadata_warn_once++) /* Warn once when raised above threshold */
|
||||
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
|
||||
device, dm_percent_to_float(state->metadata_percent));
|
||||
if (state->metadata_percent > CHECK_MINIMUM) {
|
||||
/* Run action when usage raised more than CHECK_STEP since the last time */
|
||||
if (state->metadata_percent > state->metadata_percent_check)
|
||||
needs_policy = 1;
|
||||
state->metadata_percent_check = (state->metadata_percent / CHECK_STEP + 1) * CHECK_STEP;
|
||||
if (state->metadata_percent_check == DM_PERCENT_100)
|
||||
state->metadata_percent_check--; /* Can't get bigger then 100% */
|
||||
} else
|
||||
state->metadata_percent_check = CHECK_MINIMUM;
|
||||
percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
|
||||
if (percent >= state->metadata_percent_check) {
|
||||
/*
|
||||
* Usage has raised more than CHECK_STEP since the last
|
||||
* time. Run actions.
|
||||
*/
|
||||
state->metadata_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
|
||||
|
||||
state->data_percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
|
||||
if (state->data_percent <= WARNING_THRESH)
|
||||
state->data_warn_once = 0;
|
||||
else if (!state->data_warn_once++)
|
||||
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
|
||||
device, dm_percent_to_float(state->data_percent));
|
||||
if (state->data_percent > CHECK_MINIMUM) {
|
||||
/* Run action when usage raised more than CHECK_STEP since the last time */
|
||||
if (state->data_percent > state->data_percent_check)
|
||||
needs_policy = 1;
|
||||
state->data_percent_check = (state->data_percent / CHECK_STEP + 1) * CHECK_STEP;
|
||||
if (state->data_percent_check == DM_PERCENT_100)
|
||||
state->data_percent_check--; /* Can't get bigger then 100% */
|
||||
} else
|
||||
state->data_percent_check = CHECK_MINIMUM;
|
||||
/* FIXME: extension of metadata needs to be written! */
|
||||
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
|
||||
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
|
||||
device, dm_percent_to_float(percent));
|
||||
needs_policy = 1;
|
||||
|
||||
/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
|
||||
* Avoids too high number of error retries, yet shows some status messages in log regularly.
|
||||
* i.e. PV could have been pvmoved and VG/LV was locked for a while...
|
||||
*/
|
||||
if (state->fails) {
|
||||
if (state->fails++ <= state->max_fails) {
|
||||
log_debug("Postponing frequently failing policy (%u <= %u).",
|
||||
state->fails - 1, state->max_fails);
|
||||
return;
|
||||
}
|
||||
if (state->max_fails < MAX_FAILS)
|
||||
state->max_fails <<= 1;
|
||||
state->fails = needs_policy = 1; /* Retry failing command */
|
||||
} else
|
||||
state->max_fails = 1; /* Reset on success */
|
||||
if (percent >= UMOUNT_THRESH)
|
||||
needs_umount = 1;
|
||||
}
|
||||
|
||||
if (needs_policy)
|
||||
_use_policy(dmt, state);
|
||||
percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
|
||||
if (percent >= state->data_percent_check) {
|
||||
/*
|
||||
* Usage has raised more than CHECK_STEP since
|
||||
* the last time. Run actions.
|
||||
*/
|
||||
state->data_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
|
||||
|
||||
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
|
||||
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
|
||||
device, dm_percent_to_float(percent));
|
||||
needs_policy = 1;
|
||||
|
||||
if (percent >= UMOUNT_THRESH)
|
||||
needs_umount = 1;
|
||||
}
|
||||
|
||||
if (needs_policy &&
|
||||
_use_policy(dmt, state))
|
||||
needs_umount = 0; /* No umount when command was successful */
|
||||
out:
|
||||
if (needs_umount) {
|
||||
_umount(dmt);
|
||||
/* Until something changes, do not retry any more actions */
|
||||
state->data_percent_check = state->metadata_percent_check = (DM_PERCENT_1 * 101);
|
||||
}
|
||||
|
||||
if (tps)
|
||||
dm_pool_free(state->mem, tps);
|
||||
|
||||
if (state->fails >= MAX_FAILS) {
|
||||
log_warn("WARNING: Dropping monitoring of %s. "
|
||||
"lvm2 command fails too often (%u times in row).",
|
||||
device, state->fails);
|
||||
pthread_kill(pthread_self(), SIGALRM);
|
||||
}
|
||||
|
||||
if (new_dmt)
|
||||
dm_task_destroy(new_dmt);
|
||||
}
|
||||
|
||||
/* Handle SIGCHLD for a thread */
|
||||
static void _sig_child(int signum __attribute__((unused)))
|
||||
{
|
||||
/* empty SIG_IGN */;
|
||||
}
|
||||
|
||||
/* Setup handler for SIGCHLD when executing external command
|
||||
* to get quick 'waitpid()' reaction
|
||||
* It will interrupt syscall just like SIGALRM and
|
||||
* invoke process_event().
|
||||
*/
|
||||
static void _init_thread_signals(struct dso_state *state)
|
||||
{
|
||||
struct sigaction act = { .sa_handler = _sig_child };
|
||||
sigset_t my_sigset;
|
||||
|
||||
sigemptyset(&my_sigset);
|
||||
|
||||
if (sigaction(SIGCHLD, &act, NULL))
|
||||
log_warn("WARNING: Failed to set SIGCHLD action.");
|
||||
else if (sigaddset(&my_sigset, SIGCHLD))
|
||||
log_warn("WARNING: Failed to add SIGCHLD to set.");
|
||||
else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
|
||||
log_warn("WARNING: Failed to unblock SIGCHLD.");
|
||||
else
|
||||
state->restore_sigset = 1;
|
||||
}
|
||||
|
||||
static void _restore_thread_signals(struct dso_state *state)
|
||||
{
|
||||
if (state->restore_sigset &&
|
||||
pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
|
||||
log_warn("WARNING: Failed to block SIGCHLD.");
|
||||
}
|
||||
|
||||
int register_device(const char *device,
|
||||
const char *uuid __attribute__((unused)),
|
||||
int major __attribute__((unused)),
|
||||
@@ -353,36 +468,20 @@ int register_device(const char *device,
|
||||
void **user)
|
||||
{
|
||||
struct dso_state *state;
|
||||
int maxcmd;
|
||||
char *str;
|
||||
|
||||
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
|
||||
goto_bad;
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
|
||||
sizeof(state->cmd_str),
|
||||
"_dmeventd_thin_command", device)) {
|
||||
"lvextend --use-policies",
|
||||
device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
if (strncmp(state->cmd_str, "lvm ", 4)) {
|
||||
maxcmd = 2; /* space for last NULL element */
|
||||
for (str = state->cmd_str; *str; str++)
|
||||
if (*str == ' ')
|
||||
maxcmd++;
|
||||
if (!(str = dm_pool_strdup(state->mem, state->cmd_str)) ||
|
||||
!(state->argv = dm_pool_zalloc(state->mem, maxcmd * sizeof(char *)))) {
|
||||
log_error("Failed to allocate memory for command.");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
dm_split_words(str, maxcmd - 1, 0, state->argv);
|
||||
_init_thread_signals(state);
|
||||
} else
|
||||
memmove(state->cmd_str, state->cmd_str + 4, strlen(state->cmd_str + 4) + 1);
|
||||
|
||||
state->pid = -1;
|
||||
state->metadata_percent_check = CHECK_MINIMUM;
|
||||
state->data_percent_check = CHECK_MINIMUM;
|
||||
*user = state;
|
||||
|
||||
log_info("Monitoring thin pool %s.", device);
|
||||
@@ -401,28 +500,6 @@ int unregister_device(const char *device,
|
||||
void **user)
|
||||
{
|
||||
struct dso_state *state = *user;
|
||||
int i;
|
||||
|
||||
for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
|
||||
if (i == 0)
|
||||
/* Give it 2 seconds, then try to terminate & kill it */
|
||||
log_verbose("Child %d still not finished (%s) waiting.",
|
||||
state->pid, state->cmd_str);
|
||||
else if (i == 3) {
|
||||
log_warn("WARNING: Terminating child %d.", state->pid);
|
||||
kill(state->pid, SIGINT);
|
||||
kill(state->pid, SIGTERM);
|
||||
} else if (i == 5) {
|
||||
log_warn("WARNING: Killing child %d.", state->pid);
|
||||
kill(state->pid, SIGKILL);
|
||||
}
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
if (state->pid != -1)
|
||||
log_warn("WARNING: Cannot kill child %d!", state->pid);
|
||||
|
||||
_restore_thread_signals(state);
|
||||
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
log_info("No longer monitoring thin pool %s.", device);
|
||||
|
@@ -295,7 +295,7 @@ def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
|
||||
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
@@ -303,18 +303,20 @@ def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
else:
|
||||
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
|
||||
return cmd
|
||||
|
||||
|
||||
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
|
||||
cmd.extend(['--name', name, vg_name])
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool):
|
||||
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
if not thin_pool:
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
else:
|
||||
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
|
||||
|
||||
cmd.extend(['--stripes', str(num_stripes)])
|
||||
|
||||
if stripe_size_kb != 0:
|
||||
|
@@ -272,26 +272,6 @@ class LvCommon(AutomatedProperties):
|
||||
self.state = object_state
|
||||
self._move_pv = self._get_move_pv()
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(lv_uuid, lv_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
if not dbo:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
return dbo
|
||||
|
||||
@property
|
||||
def VolumeType(self):
|
||||
type_map = {'C': 'Cache', 'm': 'mirrored',
|
||||
@@ -428,10 +408,24 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _remove(lv_uuid, lv_name, remove_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Remove the LV, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# Remove the LV, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -449,11 +443,24 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _rename(lv_uuid, lv_name, new_name, rename_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Rename the logical volume
|
||||
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# Rename the logical volume
|
||||
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -493,21 +500,32 @@ class Lv(LvCommon):
|
||||
def _snap_shot(lv_uuid, lv_name, name, optional_size,
|
||||
snapshot_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# If you specify a size you get a 'thick' snapshot even if
|
||||
# it is a thin lv
|
||||
if not dbo.IsThinVolume:
|
||||
if optional_size == 0:
|
||||
space = dbo.SizeBytes / 80
|
||||
remainder = space % 512
|
||||
optional_size = space + 512 - remainder
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options, name, optional_size)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
if dbo:
|
||||
# If you specify a size you get a 'thick' snapshot even if
|
||||
# it is a thin lv
|
||||
if not dbo.IsThinVolume:
|
||||
if optional_size == 0:
|
||||
space = dbo.SizeBytes / 80
|
||||
remainder = space % 512
|
||||
optional_size = space + 512 - remainder
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options, name, optional_size)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -530,24 +548,38 @@ class Lv(LvCommon):
|
||||
resize_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
pv_dests = []
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
# If we have PVs, verify them
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
if dbo:
|
||||
# If we have PVs, verify them
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
size_change = new_size_bytes - dbo.SizeBytes
|
||||
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
|
||||
pv_dests, resize_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return "/"
|
||||
size_change = new_size_bytes - dbo.SizeBytes
|
||||
|
||||
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
|
||||
pv_dests, resize_options)
|
||||
|
||||
if rc == 0:
|
||||
# Refresh what's changed
|
||||
cfg.load()
|
||||
return "/"
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -580,11 +612,23 @@ class Lv(LvCommon):
|
||||
def _lv_activate_deactivate(uuid, lv_name, activate, control_flags,
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -616,11 +660,25 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
rc, out, err = cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
|
||||
rc, out, err = cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -678,13 +736,24 @@ class LvThinPool(Lv):
|
||||
@staticmethod
|
||||
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
rc, out, err = cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes)
|
||||
if rc == 0:
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=THIN_POOL_INTERFACE,
|
||||
@@ -721,13 +790,14 @@ class LvCachePool(Lv):
|
||||
|
||||
@staticmethod
|
||||
def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
|
||||
|
||||
# Make sure we have a dbus object representing cache pool
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
# Make sure we have dbus object representing lv to cache
|
||||
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
|
||||
|
||||
if lv_to_cache:
|
||||
if dbo and lv_to_cache:
|
||||
fcn = lv_to_cache.lv_full_name()
|
||||
rc, out, err = cmdhandler.lv_cache_lv(
|
||||
dbo.lv_full_name(), fcn, cache_options)
|
||||
@@ -739,14 +809,22 @@ class LvCachePool(Lv):
|
||||
cfg.load()
|
||||
|
||||
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE, 'LV to cache with object path %s not present!' %
|
||||
lv_object_path)
|
||||
msg = ""
|
||||
if not dbo:
|
||||
dbo += 'CachePool LV with uuid %s and name %s not present!' % \
|
||||
(lv_uuid, lv_name)
|
||||
|
||||
if not lv_to_cache:
|
||||
dbo += 'LV to cache with object path %s not present!' % \
|
||||
(lv_object_path)
|
||||
|
||||
raise dbus.exceptions.DBusException(LV_INTERFACE, msg)
|
||||
return lv_converted
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -777,25 +855,31 @@ class LvCacheLv(Lv):
|
||||
@staticmethod
|
||||
def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache):
|
||||
# Make sure we have a dbus object representing cache pool
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
# Get current cache name
|
||||
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
|
||||
if dbo:
|
||||
|
||||
rc, out, err = cmdhandler.lv_detach_cache(
|
||||
dbo.lv_full_name(), detach_options, destroy_cache)
|
||||
if rc == 0:
|
||||
# The cache pool gets removed as hidden and put back to
|
||||
# visible, so lets delete
|
||||
mt_remove_dbus_objects((cache_pool, dbo))
|
||||
cfg.load()
|
||||
# Get current cache name
|
||||
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
|
||||
|
||||
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
|
||||
rc, out, err = cmdhandler.lv_detach_cache(
|
||||
dbo.lv_full_name(), detach_options, destroy_cache)
|
||||
if rc == 0:
|
||||
# The cache pool gets removed as hidden and put back to
|
||||
# visible, so lets delete
|
||||
mt_remove_dbus_objects((cache_pool, dbo))
|
||||
cfg.load()
|
||||
|
||||
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
return uncached_lv_path
|
||||
|
||||
@dbus.service.method(
|
||||
|
@@ -68,20 +68,6 @@ class DataStore(object):
|
||||
else:
|
||||
table[key] = record
|
||||
|
||||
@staticmethod
|
||||
def _pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup):
|
||||
for p in c_pvs.values():
|
||||
# Capture which PVs are associated with which VG
|
||||
if p['vg_uuid'] not in c_pvs_in_vgs:
|
||||
c_pvs_in_vgs[p['vg_uuid']] = []
|
||||
|
||||
if p['vg_name']:
|
||||
c_pvs_in_vgs[p['vg_uuid']].append(
|
||||
(p['pv_name'], p['pv_uuid']))
|
||||
|
||||
# Lookup for translating between /dev/<name> and pv uuid
|
||||
c_lookup[p['pv_name']] = p['pv_uuid']
|
||||
|
||||
@staticmethod
|
||||
def _parse_pvs(_pvs):
|
||||
pvs = sorted(_pvs, key=lambda pk: pk['pv_name'])
|
||||
@@ -95,7 +81,18 @@ class DataStore(object):
|
||||
c_pvs, p['pv_uuid'], p,
|
||||
['pvseg_start', 'pvseg_size', 'segtype'])
|
||||
|
||||
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
|
||||
for p in c_pvs.values():
|
||||
# Capture which PVs are associated with which VG
|
||||
if p['vg_uuid'] not in c_pvs_in_vgs:
|
||||
c_pvs_in_vgs[p['vg_uuid']] = []
|
||||
|
||||
if p['vg_name']:
|
||||
c_pvs_in_vgs[p['vg_uuid']].append(
|
||||
(p['pv_name'], p['pv_uuid']))
|
||||
|
||||
# Lookup for translating between /dev/<name> and pv uuid
|
||||
c_lookup[p['pv_name']] = p['pv_uuid']
|
||||
|
||||
return c_pvs, c_lookup, c_pvs_in_vgs
|
||||
|
||||
@staticmethod
|
||||
@@ -135,7 +132,17 @@ class DataStore(object):
|
||||
i['pvseg_size'] = i['pv_pe_count']
|
||||
i['segtype'] = 'free'
|
||||
|
||||
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
|
||||
for p in c_pvs.values():
|
||||
# Capture which PVs are associated with which VG
|
||||
if p['vg_uuid'] not in c_pvs_in_vgs:
|
||||
c_pvs_in_vgs[p['vg_uuid']] = []
|
||||
|
||||
if p['vg_name']:
|
||||
c_pvs_in_vgs[p['vg_uuid']].append(
|
||||
(p['pv_name'], p['pv_uuid']))
|
||||
|
||||
# Lookup for translating between /dev/<name> and pv uuid
|
||||
c_lookup[p['pv_name']] = p['pv_uuid']
|
||||
|
||||
return c_pvs, c_lookup, c_pvs_in_vgs
|
||||
|
||||
|
@@ -30,16 +30,6 @@ class Manager(AutomatedProperties):
|
||||
def Version(self):
|
||||
return dbus.String('1.0.0')
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def _pv_create(device, create_options):
|
||||
|
||||
@@ -51,8 +41,15 @@ class Manager(AutomatedProperties):
|
||||
MANAGER_INTERFACE, "PV Already exists!")
|
||||
|
||||
rc, out, err = cmdhandler.pv_create(create_options, [device])
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return cfg.om.get_object_path_by_lvm_id(device)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
created_pv = cfg.om.get_object_path_by_lvm_id(device)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
return created_pv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
@@ -79,8 +76,14 @@ class Manager(AutomatedProperties):
|
||||
MANAGER_INTERFACE, 'object path = %s not found' % p)
|
||||
|
||||
rc, out, err = cmdhandler.vg_create(create_options, pv_devices, name)
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return cfg.om.get_object_path_by_lvm_id(name)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id(name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
@@ -197,8 +200,15 @@ class Manager(AutomatedProperties):
|
||||
activate, cache, device_path,
|
||||
major_minor, scan_options)
|
||||
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
if rc == 0:
|
||||
# This could potentially change the state quite a bit, so lets
|
||||
# update everything to be safe
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
|
@@ -135,30 +135,23 @@ class Pv(AutomatedProperties):
|
||||
def _remove(pv_uuid, pv_name, remove_options):
|
||||
# Remove the PV, if successful then remove from the model
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(pv_uuid, pv_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
if not dbo:
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
return dbo
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=PV_INTERFACE,
|
||||
@@ -175,11 +168,22 @@ class Pv(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _resize(pv_uuid, pv_name, new_size_bytes, resize_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
|
||||
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
resize_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -197,10 +201,21 @@ class Pv(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
rc, out, err = cmdhandler.pv_allocatable(
|
||||
pv_name, yes_no, allocation_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_allocatable(
|
||||
pv_name, yes_no, allocation_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
|
@@ -145,35 +145,29 @@ class Vg(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def fetch_new_lv(vg_name, lv_name):
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id("%s/%s" % (vg_name, lv_name))
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(vg_uuid, vg_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(vg_uuid, vg_name)
|
||||
if not dbo:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(vg_uuid, vg_name))
|
||||
return dbo
|
||||
|
||||
@staticmethod
|
||||
def _rename(uuid, vg_name, new_name, rename_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_rename(
|
||||
vg_name, new_name, rename_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_rename(vg_name, new_name,
|
||||
rename_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -190,10 +184,24 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _remove(uuid, vg_name, remove_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
# Remove the VG, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
# Remove the VG, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -208,9 +216,26 @@ class Vg(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def _change(uuid, vg_name, change_options):
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
|
||||
|
||||
# To use an example with d-feet (Method input)
|
||||
# {"activate": __import__('gi.repository.GLib', globals(),
|
||||
# locals(), ['Variant']).Variant("s", "n")}
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
return '/'
|
||||
|
||||
# TODO: This should be broken into a number of different methods
|
||||
@@ -231,24 +256,34 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _reduce(uuid, vg_name, missing, pv_object_paths, reduce_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
pv_devices = []
|
||||
if dbo:
|
||||
pv_devices = []
|
||||
|
||||
# If pv_object_paths is not empty, then get the device paths
|
||||
if pv_object_paths and len(pv_object_paths) > 0:
|
||||
for pv_op in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(pv_op)
|
||||
if pv:
|
||||
pv_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Object path not found = %s!' % pv_op)
|
||||
# If pv_object_paths is not empty, then get the device paths
|
||||
if pv_object_paths and len(pv_object_paths) > 0:
|
||||
for pv_op in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(pv_op)
|
||||
if pv:
|
||||
pv_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Object path not found = %s!' % pv_op)
|
||||
|
||||
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
|
||||
reduce_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
|
||||
reduce_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -265,26 +300,36 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _extend(uuid, vg_name, pv_object_paths, extend_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
extend_devices = []
|
||||
if dbo:
|
||||
extend_devices = []
|
||||
|
||||
for i in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(i)
|
||||
if pv:
|
||||
extend_devices.append(pv.lvm_id)
|
||||
for i in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(i)
|
||||
if pv:
|
||||
extend_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV Object path not found = %s!' % i)
|
||||
|
||||
if len(extend_devices):
|
||||
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
|
||||
extend_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV Object path not found = %s!' % i)
|
||||
|
||||
if len(extend_devices):
|
||||
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
|
||||
extend_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
VG_INTERFACE, 'No pv_object_paths provided!')
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'No pv_object_paths provided!')
|
||||
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -321,24 +366,33 @@ class Vg(AutomatedProperties):
|
||||
create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
pv_dests = []
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
if dbo:
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
rc, out, err = cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests)
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests)
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
if rc == 0:
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -374,13 +428,25 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_linear(uuid, vg_name, name, size_bytes,
|
||||
thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool)
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool)
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -400,12 +466,24 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_striped(uuid, vg_name, name, size_bytes, num_stripes,
|
||||
stripe_size_kb, thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_striped(
|
||||
vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_striped(
|
||||
vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -428,11 +506,25 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_mirror(uuid, vg_name, name, size_bytes,
|
||||
num_copies, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -453,12 +545,26 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_raid(uuid, vg_name, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_raid(
|
||||
vg_name, create_options, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_raid(
|
||||
vg_name, create_options, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -479,27 +585,33 @@ class Vg(AutomatedProperties):
|
||||
def _create_pool(uuid, vg_name, meta_data_lv, data_lv,
|
||||
create_options, create_method):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
# Retrieve the full names for the metadata and data lv
|
||||
md = cfg.om.get_object_by_path(meta_data_lv)
|
||||
data = cfg.om.get_object_by_path(data_lv)
|
||||
|
||||
if md and data:
|
||||
if dbo and md and data:
|
||||
|
||||
new_name = data.Name
|
||||
|
||||
rc, out, err = create_method(
|
||||
md.lv_full_name(), data.lv_full_name(), create_options)
|
||||
|
||||
if rc == 0:
|
||||
mt_remove_dbus_objects((md, data))
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
cache_pool_lv = Vg.fetch_new_lv(vg_name, new_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
msg = ""
|
||||
|
||||
if not dbo:
|
||||
msg += 'VG with uuid %s and name %s not present!' % \
|
||||
(uuid, vg_name)
|
||||
|
||||
if not md:
|
||||
msg += 'Meta data LV with object path %s not present!' % \
|
||||
(meta_data_lv)
|
||||
@@ -510,7 +622,7 @@ class Vg(AutomatedProperties):
|
||||
|
||||
raise dbus.exceptions.DBusException(VG_INTERFACE, msg)
|
||||
|
||||
return Vg.fetch_new_lv(vg_name, new_name)
|
||||
return cache_pool_lv
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -544,21 +656,33 @@ class Vg(AutomatedProperties):
|
||||
pv_devices = []
|
||||
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
# Check for existence of pv object paths
|
||||
for p in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(p)
|
||||
if pv:
|
||||
pv_devices.append(pv.Name)
|
||||
if dbo:
|
||||
# Check for existence of pv object paths
|
||||
for p in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(p)
|
||||
if pv:
|
||||
pv_devices.append(pv.Name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV object path = %s not found' % p)
|
||||
|
||||
rc, out, err = cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV object path = %s not found' % p)
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
rc, out, err = cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -596,12 +720,25 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _vg_add_rm_tags(uuid, vg_name, tags_add, tags_del, tag_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
rc, out, err = cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
if dbo:
|
||||
|
||||
rc, out, err = cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -638,10 +775,23 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _vg_change_set(uuid, vg_name, method, value, options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = method(vg_name, value, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = method(vg_name, value, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -699,11 +849,23 @@ class Vg(AutomatedProperties):
|
||||
def _vg_activate_deactivate(uuid, vg_name, activate, control_flags,
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
|
@@ -1,14 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU Lesser General Public License v.2.1.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
@@ -61,7 +61,7 @@ struct dev_manager {
|
||||
int flush_required;
|
||||
int activation; /* building activation tree */
|
||||
int suspend; /* building suspend tree */
|
||||
unsigned track_external_lv_deps;
|
||||
int skip_external_lv;
|
||||
struct dm_list pending_delete; /* str_list of dlid(s) with pending delete */
|
||||
unsigned track_pending_delete;
|
||||
unsigned track_pvmove_deps;
|
||||
@@ -2039,16 +2039,16 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
#endif
|
||||
}
|
||||
|
||||
if (origin_only && dm->activation && dm->track_external_lv_deps &&
|
||||
if (origin_only && dm->activation && !dm->skip_external_lv &&
|
||||
lv_is_external_origin(lv)) {
|
||||
/* Find possible users of external origin lv */
|
||||
dm->track_external_lv_deps = 0; /* avoid recursion */
|
||||
dm->skip_external_lv = 1; /* avoid recursion */
|
||||
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
|
||||
/* Match only external_lv users */
|
||||
if ((sl->seg->external_lv == lv) &&
|
||||
!_add_lv_to_dtree(dm, dtree, sl->seg->lv, 1))
|
||||
return_0;
|
||||
dm->track_external_lv_deps = 1;
|
||||
dm->skip_external_lv = 0;
|
||||
}
|
||||
|
||||
if (lv_is_thin_pool(lv)) {
|
||||
@@ -2148,7 +2148,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
|
||||
/* Add any LVs used by segments in this LV */
|
||||
dm_list_iterate_items(seg, &lv->segments) {
|
||||
if (seg->external_lv && dm->track_external_lv_deps &&
|
||||
if (seg->external_lv && !dm->skip_external_lv &&
|
||||
!_add_lv_to_dtree(dm, dtree, seg->external_lv, 1)) /* stack */
|
||||
return_0;
|
||||
if (seg->log_lv &&
|
||||
@@ -2158,7 +2158,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
|
||||
return_0;
|
||||
if (seg->pool_lv &&
|
||||
(lv_is_cache_pool(seg->pool_lv) || dm->track_external_lv_deps) &&
|
||||
(lv_is_cache_pool(seg->pool_lv) || !dm->skip_external_lv) &&
|
||||
/* When activating and not origin_only detect linear 'overlay' over pool */
|
||||
!_add_lv_to_dtree(dm, dtree, seg->pool_lv, dm->activation ? origin_only : 1))
|
||||
return_0;
|
||||
@@ -2575,7 +2575,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
struct seg_list *sl;
|
||||
|
||||
/* Do not want to recursively add externals again */
|
||||
if (!dm->track_external_lv_deps)
|
||||
if (dm->skip_external_lv)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
@@ -2583,7 +2583,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
* process all LVs related to this LV, and we want to
|
||||
* skip repeated invocation of external lv processing
|
||||
*/
|
||||
dm->track_external_lv_deps = 0;
|
||||
dm->skip_external_lv = 1;
|
||||
|
||||
log_debug_activation("Adding external origin LV %s and all active users.",
|
||||
display_lvname(external_lv));
|
||||
@@ -2609,7 +2609,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
log_debug_activation("Finished adding external origin LV %s and all active users.",
|
||||
display_lvname(external_lv));
|
||||
|
||||
dm->track_external_lv_deps = 1;
|
||||
dm->skip_external_lv = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -3075,7 +3075,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
(laopts->origin_only) ? " origin-only" : "",
|
||||
display_lvname(lv));
|
||||
|
||||
/* Some LV cannot be used for top level tree */
|
||||
/* Some LV can be used for top level tree */
|
||||
/* TODO: add more.... */
|
||||
if (lv_is_cache_pool(lv) && !dm_list_empty(&lv->segs_using_this_lv)) {
|
||||
log_error(INTERNAL_ERROR "Cannot create tree for %s.",
|
||||
@@ -3085,7 +3085,6 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
/* Some targets may build bigger tree for activation */
|
||||
dm->activation = ((action == PRELOAD) || (action == ACTIVATE));
|
||||
dm->suspend = (action == SUSPEND_WITH_LOCKFS) || (action == SUSPEND);
|
||||
dm->track_external_lv_deps = 1;
|
||||
|
||||
if (!(dtree = _create_partial_dtree(dm, lv, laopts->origin_only)))
|
||||
return_0;
|
||||
|
@@ -192,9 +192,6 @@ static int _get_env_vars(struct cmd_context *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp((getenv("LVM_RUN_BY_DMEVENTD") ? : "0"), "1") == 0)
|
||||
init_run_by_dmeventd(cmd);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1758,15 +1755,6 @@ bad:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int init_run_by_dmeventd(struct cmd_context *cmd)
|
||||
{
|
||||
init_dmeventd_monitor(DMEVENTD_MONITOR_IGNORE);
|
||||
init_ignore_suspended_devices(1);
|
||||
init_disable_dmeventd_monitoring(1); /* Lock settings */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void destroy_config_context(struct cmd_context *cmd)
|
||||
{
|
||||
_destroy_config(cmd);
|
||||
|
@@ -242,7 +242,6 @@ int config_files_changed(struct cmd_context *cmd);
|
||||
int init_lvmcache_orphans(struct cmd_context *cmd);
|
||||
int init_filters(struct cmd_context *cmd, unsigned load_persistent_cache);
|
||||
int init_connections(struct cmd_context *cmd);
|
||||
int init_run_by_dmeventd(struct cmd_context *cmd);
|
||||
|
||||
/*
|
||||
* A config context is a very light weight cmd struct that
|
||||
|
@@ -1221,15 +1221,14 @@ cfg_array(activation_read_only_volume_list_CFG, "read_only_volume_list", activat
|
||||
"read_only_volume_list = [ \"vg1\", \"vg2/lvol1\", \"@tag1\", \"@*\" ]\n"
|
||||
"#\n")
|
||||
|
||||
cfg(activation_mirror_region_size_CFG, "mirror_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(1, 0, 0), NULL, vsn(2, 2, 99),
|
||||
cfg(activation_mirror_region_size_CFG, "mirror_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(1, 0, 0), NULL, vsn(2, 2, 99),
|
||||
"This has been replaced by the activation/raid_region_size setting.\n",
|
||||
"Size in KiB of each raid or mirror synchronization region.\n")
|
||||
"Size in KiB of each copy operation when mirroring.\n")
|
||||
|
||||
cfg(activation_raid_region_size_CFG, "raid_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(2, 2, 99), NULL, 0, NULL,
|
||||
"Size in KiB of each raid or mirror synchronization region.\n"
|
||||
"The clean/dirty state of data is tracked for each region.\n"
|
||||
"The value is rounded down to a power of two if necessary, and\n"
|
||||
"is ignored if it is not a multiple of the machine memory page size.\n")
|
||||
"For raid or mirror segment types, this is the amount of data that is\n"
|
||||
"copied at once when initializing, or moved at once by pvmove.\n")
|
||||
|
||||
cfg(activation_error_when_full_CFG, "error_when_full", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_ERROR_WHEN_FULL, vsn(2, 2, 115), NULL, 0, NULL,
|
||||
"Return errors if a thin pool runs out of space.\n"
|
||||
@@ -1860,12 +1859,6 @@ cfg(dmeventd_thin_library_CFG, "thin_library", dmeventd_CFG_SECTION, 0, CFG_TYPE
|
||||
"and emits a warning through syslog when the usage exceeds 80%. The\n"
|
||||
"warning is repeated when 85%, 90% and 95% of the pool is filled.\n")
|
||||
|
||||
cfg(dmeventd_thin_command_CFG, "thin_command", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_THIN_COMMAND, vsn(2, 2, 169), NULL, 0, NULL,
|
||||
"The plugin runs command with each 5% increment when thin-pool data volume\n"
|
||||
"or metadata volume gets above 50%.\n"
|
||||
"Command which starts with 'lvm ' prefix is internal lvm command.\n"
|
||||
"You can write your own handler to customise behaviour in more details.\n")
|
||||
|
||||
cfg(dmeventd_executable_CFG, "executable", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_PATH, vsn(2, 2, 73), "@DMEVENTD_PATH@", 0, NULL,
|
||||
"The full path to the dmeventd binary.\n")
|
||||
|
||||
|
@@ -81,7 +81,6 @@
|
||||
#define DEFAULT_DMEVENTD_MIRROR_LIB "libdevmapper-event-lvm2mirror.so"
|
||||
#define DEFAULT_DMEVENTD_SNAPSHOT_LIB "libdevmapper-event-lvm2snapshot.so"
|
||||
#define DEFAULT_DMEVENTD_THIN_LIB "libdevmapper-event-lvm2thin.so"
|
||||
#define DEFAULT_DMEVENTD_THIN_COMMAND "lvm lvextend --use-policies"
|
||||
#define DEFAULT_DMEVENTD_MONITOR 1
|
||||
#define DEFAULT_BACKGROUND_POLLING 1
|
||||
|
||||
@@ -177,7 +176,7 @@
|
||||
#define DEFAULT_INDENT 1
|
||||
#define DEFAULT_ABORT_ON_INTERNAL_ERRORS 0
|
||||
#define DEFAULT_DETECT_INTERNAL_VG_CACHE_CORRUPTION 0
|
||||
#define DEFAULT_UNITS "r"
|
||||
#define DEFAULT_UNITS "h"
|
||||
#define DEFAULT_SUFFIX 1
|
||||
#define DEFAULT_HOSTTAGS 0
|
||||
|
||||
|
@@ -125,10 +125,6 @@ struct dev_types *create_dev_types(const char *proc_dir,
|
||||
if (!strncmp("emcpower", line + i, 8) && isspace(*(line + i + 8)))
|
||||
dt->emcpower_major = line_maj;
|
||||
|
||||
/* Look for Veritas Dynamic Multipathing */
|
||||
if (!strncmp("VxDMP", line + i, 5) && isspace(*(line + i + 5)))
|
||||
dt->vxdmp_major = line_maj;
|
||||
|
||||
if (!strncmp("loop", line + i, 4) && isspace(*(line + i + 4)))
|
||||
dt->loop_major = line_maj;
|
||||
|
||||
@@ -222,9 +218,6 @@ int dev_subsystem_part_major(struct dev_types *dt, struct device *dev)
|
||||
if (MAJOR(dev->dev) == dt->power2_major)
|
||||
return 1;
|
||||
|
||||
if (MAJOR(dev->dev) == dt->vxdmp_major)
|
||||
return 1;
|
||||
|
||||
if ((MAJOR(dev->dev) == dt->blkext_major) &&
|
||||
dev_get_primary_dev(dt, dev, &primary_dev) &&
|
||||
(MAJOR(primary_dev) == dt->md_major))
|
||||
@@ -253,9 +246,6 @@ const char *dev_subsystem_name(struct dev_types *dt, struct device *dev)
|
||||
if (MAJOR(dev->dev) == dt->power2_major)
|
||||
return "POWER2";
|
||||
|
||||
if (MAJOR(dev->dev) == dt->vxdmp_major)
|
||||
return "VXDMP";
|
||||
|
||||
if (MAJOR(dev->dev) == dt->blkext_major)
|
||||
return "BLKEXT";
|
||||
|
||||
|
@@ -41,7 +41,6 @@ struct dev_types {
|
||||
int drbd_major;
|
||||
int device_mapper_major;
|
||||
int emcpower_major;
|
||||
int vxdmp_major;
|
||||
int power2_major;
|
||||
int dasd_major;
|
||||
int loop_major;
|
||||
|
@@ -63,6 +63,5 @@ static const dev_known_type_t _dev_known_types[] = {
|
||||
{"bcache", 1, "bcache block device cache"},
|
||||
{"nvme", 64, "NVM Express"},
|
||||
{"zvol", 16, "ZFS Zvols"},
|
||||
{"VxDMP", 16, "Veritas Dynamic Multipathing"},
|
||||
{"", 0, ""}
|
||||
};
|
||||
|
@@ -712,7 +712,6 @@ static int _round_down_pow2(int r)
|
||||
|
||||
int get_default_region_size(struct cmd_context *cmd)
|
||||
{
|
||||
int pagesize = lvm_getpagesize();
|
||||
int region_size = _get_default_region_size(cmd);
|
||||
|
||||
if (!is_power_of_2(region_size)) {
|
||||
@@ -721,12 +720,6 @@ int get_default_region_size(struct cmd_context *cmd)
|
||||
region_size / 2);
|
||||
}
|
||||
|
||||
if (region_size % (pagesize >> SECTOR_SHIFT)) {
|
||||
region_size = DEFAULT_RAID_REGION_SIZE * 2;
|
||||
log_verbose("Using default region size %u kiB (multiple of page size).",
|
||||
region_size / 2);
|
||||
}
|
||||
|
||||
return region_size;
|
||||
}
|
||||
|
||||
@@ -4693,11 +4686,6 @@ static int _lvresize_check(struct logical_volume *lv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lv_is_cache_type(lv)) {
|
||||
log_error("Unable to resize logical volumes of cache type.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!lv_is_visible(lv) &&
|
||||
!lv_is_thin_pool_metadata(lv) &&
|
||||
!lv_is_lockd_sanlock_lv(lv)) {
|
||||
@@ -6539,43 +6527,12 @@ int remove_layer_from_lv(struct logical_volume *lv,
|
||||
* Before removal, the layer should be cleaned up,
|
||||
* i.e. additional segments and areas should have been removed.
|
||||
*/
|
||||
/* FIXME:
|
||||
* These are all INTERNAL_ERROR, but ATM there is
|
||||
* some internal API problem and this code is wrongle
|
||||
* executed with certain mirror manipulations.
|
||||
* So we need to fix mirror code first, then switch...
|
||||
*/
|
||||
if (dm_list_size(&parent_lv->segments) != 1) {
|
||||
log_error("Invalid %d segments in %s, expected only 1.",
|
||||
dm_list_size(&parent_lv->segments),
|
||||
display_lvname(parent_lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parent_seg->area_count != 1) {
|
||||
log_error("Invalid %d area count(s) in %s, expected only 1.",
|
||||
parent_seg->area_count, display_lvname(parent_lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (seg_type(parent_seg, 0) != AREA_LV) {
|
||||
log_error("Invalid seg_type %d in %s, expected LV.",
|
||||
seg_type(parent_seg, 0), display_lvname(parent_lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (layer_lv != seg_lv(parent_seg, 0)) {
|
||||
log_error("Layer doesn't match segment in %s.",
|
||||
display_lvname(parent_lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parent_lv->le_count != layer_lv->le_count) {
|
||||
log_error("Inconsistent extent count (%u != %u) of layer %s.",
|
||||
parent_lv->le_count, layer_lv->le_count,
|
||||
display_lvname(parent_lv));
|
||||
return 0;
|
||||
}
|
||||
if (dm_list_size(&parent_lv->segments) != 1 ||
|
||||
parent_seg->area_count != 1 ||
|
||||
seg_type(parent_seg, 0) != AREA_LV ||
|
||||
layer_lv != seg_lv(parent_seg, 0) ||
|
||||
parent_lv->le_count != layer_lv->le_count)
|
||||
return_0;
|
||||
|
||||
if (!lv_empty(parent_lv))
|
||||
return_0;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -148,15 +148,7 @@ static void _check_raid1_seg(struct lv_segment *seg, int *error_count)
|
||||
static void _check_raid45610_seg(struct lv_segment *seg, int *error_count)
|
||||
{
|
||||
/* Checks applying to any raid4/5/6/10 */
|
||||
/*
|
||||
* Allow raid4 + raid5_n to get activated w/o metadata.
|
||||
*
|
||||
* This is mandatory during conversion between them,
|
||||
* because switching the dedicated parity SubLVs
|
||||
* beginning <-> end changes the roles of all SubLVs
|
||||
* which the kernel would reject.
|
||||
*/
|
||||
if (!(seg_is_raid4(seg) || seg_is_raid5_n(seg)) && !seg->meta_areas)
|
||||
if (!seg->meta_areas)
|
||||
raid_seg_error("no meta areas");
|
||||
if (!seg->stripe_size)
|
||||
raid_seg_error("zero stripe size");
|
||||
@@ -618,7 +610,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
continue;
|
||||
if (lv == seg_lv(seg, s))
|
||||
seg_found++;
|
||||
if (seg->meta_areas && seg_is_raid_with_meta(seg) && (lv == seg_metalv(seg, s)))
|
||||
if (seg_is_raid_with_meta(seg) && (lv == seg_metalv(seg, s)))
|
||||
seg_found++;
|
||||
}
|
||||
if (seg_is_replicator_dev(seg)) {
|
||||
|
@@ -1224,8 +1224,6 @@ int lv_raid_replace(struct logical_volume *lv, int force,
|
||||
struct dm_list *remove_pvs, struct dm_list *allocate_pvs);
|
||||
int lv_raid_remove_missing(struct logical_volume *lv);
|
||||
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2011-2014 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -745,27 +745,6 @@ static int _alloc_image_components(struct logical_volume *lv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* HM Helper:
|
||||
*
|
||||
* Calculate absolute amount of metadata device extents based
|
||||
* on @rimage_extents, @region_size and @extent_size.
|
||||
*/
|
||||
static uint32_t _raid_rmeta_extents(struct cmd_context *cmd, uint32_t rimage_extents,
|
||||
uint32_t region_size, uint32_t extent_size)
|
||||
{
|
||||
uint64_t bytes, regions, sectors;
|
||||
|
||||
region_size = region_size ?: get_default_region_size(cmd);
|
||||
regions = (uint64_t) rimage_extents * extent_size / region_size;
|
||||
|
||||
/* raid and bitmap superblocks + region bytes */
|
||||
bytes = 2 * 4096 + dm_div_up(regions, 8);
|
||||
sectors = dm_div_up(bytes, 512);
|
||||
|
||||
return dm_div_up(sectors, extent_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* _alloc_rmeta_for_lv
|
||||
* @lv
|
||||
@@ -2196,7 +2175,6 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
|
||||
lv->status &= ~MIRROR;
|
||||
lv->status &= ~MIRRORED;
|
||||
lv->status |= RAID;
|
||||
seg->status |= SEG_RAID;
|
||||
|
||||
if (!lv_update_and_reload(lv))
|
||||
return_0;
|
||||
@@ -2248,7 +2226,7 @@ static int _convert_raid1_to_mirror(struct logical_volume *lv,
|
||||
log_debug_metadata("Changing image count to %u on %s.",
|
||||
new_image_count, display_lvname(lv));
|
||||
if (!_lv_raid_change_image_count(lv, new_image_count, allocate_pvs, removal_lvs, 0, 0))
|
||||
return_0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remove rmeta LVs */
|
||||
@@ -2555,7 +2533,11 @@ static struct lv_segment *_convert_striped_to_raid0(struct logical_volume *lv,
|
||||
* Change the user's requested segment type to
|
||||
* the appropriate more-refined one for takeover.
|
||||
*
|
||||
* raid can takeover striped,raid0 if there is only one stripe zone
|
||||
* raid0 can take over:
|
||||
* raid4
|
||||
*
|
||||
* raid4 can take over:
|
||||
* raid0 - if there is only one stripe zone
|
||||
*/
|
||||
#define ALLOW_NONE 0x0
|
||||
#define ALLOW_STRIPES 0x2
|
||||
@@ -2585,47 +2567,44 @@ static struct possible_takeover_reshape_type _possible_takeover_reshape_types[]
|
||||
.possible_types = SEG_RAID0|SEG_RAID0_META,
|
||||
.current_areas = 1,
|
||||
.options = ALLOW_STRIPE_SIZE },
|
||||
|
||||
{ .current_types = SEG_STRIPED_TARGET, /* striped -> raid0*, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_RAID0|SEG_RAID0_META,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE },
|
||||
{ .current_types = SEG_STRIPED_TARGET, /* striped -> raid4 , i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_RAID4,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
/* raid0* -> */
|
||||
{ .current_types = SEG_RAID0|SEG_RAID0_META, /* seg->area_count = 1 */
|
||||
.possible_types = SEG_RAID1,
|
||||
.current_areas = 1,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
/* striped,raid0*,raid4,raid5_n,raid6_n_6 <-> striped,raid0*,raid4,raid5_n,raid6_n_6 */
|
||||
{ .current_types = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META|SEG_RAID4|SEG_RAID5_N|SEG_RAID6_N_6,
|
||||
.possible_types = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META|SEG_RAID4|SEG_RAID5_N|SEG_RAID6_N_6,
|
||||
{ .current_types = SEG_RAID0|SEG_RAID0_META, /* raid0* -> striped, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_STRIPED_TARGET,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE },
|
||||
{ .current_types = SEG_RAID0|SEG_RAID0_META, /* raid0* -> raid0*, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_RAID0_META|SEG_RAID0,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE },
|
||||
{ .current_types = SEG_RAID0|SEG_RAID0_META, /* raid0* -> raid4, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_RAID4,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
/* raid5_ls <-> raid6_ls_6 */
|
||||
{ .current_types = SEG_RAID5_LS|SEG_RAID6_LS_6,
|
||||
.possible_types = SEG_RAID5_LS|SEG_RAID6_LS_6,
|
||||
/* raid4 -> -> */
|
||||
{ .current_types = SEG_RAID4, /* raid4 ->striped/raid0*, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE },
|
||||
/* raid1 -> mirror */
|
||||
{ .current_types = SEG_RAID1,
|
||||
.possible_types = SEG_MIRROR,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
/* raid5_rs -> raid6_rs_6 */
|
||||
{ .current_types = SEG_RAID5_RS|SEG_RAID6_RS_6,
|
||||
.possible_types = SEG_RAID5_RS|SEG_RAID6_RS_6,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
/* raid5_ls -> raid6_la_6 */
|
||||
{ .current_types = SEG_RAID5_LA|SEG_RAID6_LA_6,
|
||||
.possible_types = SEG_RAID5_LA|SEG_RAID6_LA_6,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
/* raid5_ls -> raid6_ra_6 */
|
||||
{ .current_types = SEG_RAID5_RA|SEG_RAID6_RA_6,
|
||||
.possible_types = SEG_RAID5_RA|SEG_RAID6_RA_6,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
|
||||
/* mirror <-> raid1 with arbitrary number of legs */
|
||||
{ .current_types = SEG_MIRROR|SEG_RAID1,
|
||||
.possible_types = SEG_MIRROR|SEG_RAID1,
|
||||
/* mirror -> raid1 with arbitrary number of legs */
|
||||
{ .current_types = SEG_MIRROR,
|
||||
.possible_types = SEG_RAID1,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE }, /* FIXME: ALLOW_REGION_SIZE */
|
||||
|
||||
@@ -2932,8 +2911,6 @@ static int _raid1_to_mirrored_wrapper(TAKEOVER_FN_ARGS)
|
||||
* parity device to lv segment area 0 and thus changing MD
|
||||
* array roles, detach the MetaLVs and reload as raid0 in
|
||||
* order to wipe them then reattach and set back to raid0_meta.
|
||||
*
|
||||
* Same applies to raid4 <-> raid5.
|
||||
*/
|
||||
static int _clear_meta_lvs(struct logical_volume *lv)
|
||||
{
|
||||
@@ -2943,18 +2920,16 @@ static int _clear_meta_lvs(struct logical_volume *lv)
|
||||
const struct segment_type *tmp_segtype;
|
||||
struct dm_list meta_lvs;
|
||||
struct lv_list *lvl_array, *lvl;
|
||||
int is_raid4_or_5N = seg_is_raid4(seg) || seg_is_raid5_n(seg);
|
||||
|
||||
/* Reject non-raid0_meta/raid4/raid5_n segment types cautiously */
|
||||
if (!seg->meta_areas ||
|
||||
(!seg_is_raid0_meta(seg) && !is_raid4_or_5N))
|
||||
/* Reject non-raid0_meta segment types cautiously */
|
||||
if (!seg_is_raid0_meta(seg) ||
|
||||
!seg->meta_areas)
|
||||
return_0;
|
||||
|
||||
if (!(lvl_array = dm_pool_alloc(lv->vg->vgmem, seg->area_count * sizeof(*lvl_array))))
|
||||
return_0;
|
||||
|
||||
dm_list_init(&meta_lvs);
|
||||
tmp_segtype = seg->segtype;
|
||||
tmp_areas = seg->meta_areas;
|
||||
|
||||
/* Extract all MetaLVs listing them on @meta_lvs */
|
||||
@@ -2965,12 +2940,10 @@ static int _clear_meta_lvs(struct logical_volume *lv)
|
||||
|
||||
/* Memorize meta areas and segtype to set again after initializing. */
|
||||
seg->meta_areas = NULL;
|
||||
tmp_segtype = seg->segtype;
|
||||
|
||||
if (seg_is_raid0_meta(seg) &&
|
||||
!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID0)))
|
||||
return_0;
|
||||
|
||||
if (!lv_update_and_reload(lv))
|
||||
if (!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID0)) ||
|
||||
!lv_update_and_reload(lv))
|
||||
return_0;
|
||||
|
||||
/* Note: detached rmeta are NOT renamed */
|
||||
@@ -3093,7 +3066,7 @@ static void _shift_area_lvs(struct lv_segment *seg, uint32_t s1, uint32_t s2)
|
||||
*/
|
||||
static int _shift_parity_dev(struct lv_segment *seg)
|
||||
{
|
||||
if (seg_is_raid0_meta(seg) || seg_is_raid5_n(seg))
|
||||
if (seg_is_raid0_meta(seg))
|
||||
_shift_area_lvs(seg, seg->area_count - 1, 0);
|
||||
else if (seg_is_raid4(seg))
|
||||
_shift_area_lvs(seg, 0, seg->area_count - 1);
|
||||
@@ -3103,29 +3076,29 @@ static int _shift_parity_dev(struct lv_segment *seg)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* raid456 -> raid0* / striped */
|
||||
static int _raid45_to_raid54_wrapper(TAKEOVER_FN_ARGS);
|
||||
/* raid45 -> raid0* / striped */
|
||||
static int _raid456_to_raid0_or_striped_wrapper(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
int rename_sublvs = 0;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
struct dm_list removal_lvs;
|
||||
uint32_t region_size = seg->region_size;
|
||||
|
||||
dm_list_init(&removal_lvs);
|
||||
|
||||
/* Necessary when converting to raid0/striped w/o redundancy. */
|
||||
if (!_raid_in_sync(lv)) {
|
||||
log_error("Unable to convert %s while it is not in-sync.",
|
||||
if (!seg_is_raid4(seg) && !seg_is_raid5_n(seg) && !seg_is_raid6_n_6(seg)) {
|
||||
log_error("LV %s has to be of type raid4/raid5_n/raid6_n_6 to allow for this conversion.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Necessary when convering to raid0/striped w/o redundancy? */
|
||||
if (!_raid_in_sync(lv))
|
||||
return 0;
|
||||
|
||||
if (!yes && yes_no_prompt("Are you sure you want to convert \"%s\" LV %s to \"%s\" "
|
||||
"type losing %s resilience? [y/n]: ",
|
||||
lvseg_name(seg), display_lvname(lv), new_segtype->name,
|
||||
segtype_is_striped(new_segtype) ? "all" : "some") == 'n') {
|
||||
log_error("Logical volume %s NOT converted to \"%s\"",
|
||||
"type losing all resilience? [y/n]: ",
|
||||
lvseg_name(seg), display_lvname(lv), new_segtype->name) == 'n') {
|
||||
log_error("Logical volume %s NOT converted to \"%s\".",
|
||||
display_lvname(lv), new_segtype->name);
|
||||
return 0;
|
||||
}
|
||||
@@ -3149,23 +3122,22 @@ static int _raid456_to_raid0_or_striped_wrapper(TAKEOVER_FN_ARGS)
|
||||
|
||||
if (segtype_is_any_raid0(new_segtype) &&
|
||||
!(rename_sublvs = _rename_area_lvs(lv, "_"))) {
|
||||
log_error("Failed to rename %s LV %s MetaLVs.", lvseg_name(seg), display_lvname(lv));
|
||||
log_error("Failed to rename %s LV %s MetaLVs.",
|
||||
lvseg_name(seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Remove meta and data LVs requested */
|
||||
if (!_lv_raid_change_image_count(lv, new_image_count, allocate_pvs, &removal_lvs, 0, 0))
|
||||
return 0;
|
||||
|
||||
/* FIXME Hard-coded raid4/5/6 to striped/raid0 */
|
||||
if (segtype_is_striped_target(new_segtype) || segtype_is_any_raid0(new_segtype)) {
|
||||
seg->area_len = seg->extents_copied = seg->area_len / seg->area_count;
|
||||
if (!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID0_META)))
|
||||
return_0;
|
||||
if (!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID0_META)))
|
||||
return_0;
|
||||
|
||||
region_size = 0;
|
||||
}
|
||||
/* FIXME Hard-coded raid4 to raid0 */
|
||||
seg->area_len = seg->extents_copied = seg->area_len / seg->area_count;
|
||||
|
||||
if (segtype_is_striped_target(new_segtype)) {
|
||||
if (!_convert_raid0_to_striped(lv, 0, &removal_lvs))
|
||||
@@ -3174,103 +3146,21 @@ static int _raid456_to_raid0_or_striped_wrapper(TAKEOVER_FN_ARGS)
|
||||
!_raid0_add_or_remove_metadata_lvs(lv, 0 /* update_and_reload */, allocate_pvs, &removal_lvs))
|
||||
return_0;
|
||||
|
||||
if (segtype_is_raid4(new_segtype)) {
|
||||
if (!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID5_N)))
|
||||
return_0;
|
||||
} else
|
||||
seg->segtype = new_segtype;
|
||||
|
||||
seg->region_size = new_region_size ?: region_size;
|
||||
seg->region_size = 0;
|
||||
|
||||
if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, &removal_lvs))
|
||||
return_0;
|
||||
|
||||
if (rename_sublvs) {
|
||||
if (!_rename_area_lvs(lv, NULL)) {
|
||||
log_error("Failed to rename %s LV %s MetaLVs.", lvseg_name(seg), display_lvname(lv));
|
||||
log_error("Failed to rename %s LV %s MetaLVs.",
|
||||
lvseg_name(seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
if (!lv_update_and_reload(lv))
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (segtype_is_raid4(new_segtype))
|
||||
return _raid45_to_raid54_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count,
|
||||
1 /* data_copies */, 0, 0, 0, allocate_pvs);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* raid4 <-> raid5_n helper
|
||||
*
|
||||
* On conversions between raid4 and raid5_n, the parity SubLVs need
|
||||
* to be switched between beginning and end of the segment areas.
|
||||
*
|
||||
* The metadata devices reflect the previous positions within the RaidLV,
|
||||
* thus need to be cleared in order to allow the kernel to start the new
|
||||
* mapping and recreate metadata with the proper new position stored.
|
||||
*/
|
||||
static int _raid45_to_raid54_wrapper(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
struct dm_list removal_lvs;
|
||||
uint32_t region_size = seg->region_size;
|
||||
|
||||
dm_list_init(&removal_lvs);
|
||||
|
||||
if (!(seg_is_raid4(seg) && segtype_is_raid5_n(new_segtype)) &&
|
||||
!(seg_is_raid5_n(seg) && segtype_is_raid4(new_segtype))) {
|
||||
log_error("LV %s has to be of type raid4 or raid5_n to allow for this conversion.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Necessary when convering to raid0/striped w/o redundancy. */
|
||||
if (!_raid_in_sync(lv)) {
|
||||
log_error("Unable to convert %s while it is not in-sync.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_debug_metadata("Converting LV %s from %s to %s.", display_lvname(lv),
|
||||
(seg_is_raid4(seg) ? SEG_TYPE_NAME_RAID4 : SEG_TYPE_NAME_RAID5_N),
|
||||
(seg_is_raid4(seg) ? SEG_TYPE_NAME_RAID5_N : SEG_TYPE_NAME_RAID4));
|
||||
|
||||
/* Archive metadata */
|
||||
if (!archive(lv->vg))
|
||||
return_0;
|
||||
|
||||
if (!_rename_area_lvs(lv, "_")) {
|
||||
log_error("Failed to rename %s LV %s MetaLVs.", lvseg_name(seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_clear_meta_lvs(lv))
|
||||
return_0;
|
||||
|
||||
/* Shift parity SubLV pair "PDD..." <-> "DD...P" on raid4 <-> raid5_n conversion */
|
||||
if( !_shift_parity_dev(seg))
|
||||
return 0;
|
||||
|
||||
/* Don't resync */
|
||||
init_mirror_in_sync(1);
|
||||
seg->region_size = new_region_size ?: region_size;
|
||||
seg->segtype = new_segtype;
|
||||
|
||||
if (!_lv_update_reload_fns_reset_eliminate_lvs(lv, &removal_lvs))
|
||||
return_0;
|
||||
|
||||
init_mirror_in_sync(0);
|
||||
|
||||
if (!_rename_area_lvs(lv, NULL)) {
|
||||
log_error("Failed to rename %s LV %s MetaLVs.", lvseg_name(seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
if (!lv_update_and_reload(lv))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -3295,10 +3185,9 @@ static int _striped_to_raid0_wrapper(struct logical_volume *lv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Helper: striped/raid0* -> raid4/5/6/10, raid45 -> raid6 wrapper */
|
||||
/* Helper: striped/raid0* -> raid4/5/6/10 */
|
||||
static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
uint32_t extents_copied, region_size, seg_len, stripe_size;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
struct dm_list removal_lvs;
|
||||
|
||||
@@ -3317,13 +3206,20 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* FIXME: restricted to raid4 for the time being... */
|
||||
if (!segtype_is_raid4(new_segtype)) {
|
||||
/* Can't convert striped/raid0* to e.g. raid10_offset */
|
||||
log_error("Can't convert %s to %s.", display_lvname(lv), new_segtype->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Archive metadata */
|
||||
if (!archive(lv->vg))
|
||||
return_0;
|
||||
|
||||
/* This helper can be used to convert from striped/raid0* -> raid10 too */
|
||||
if (seg_is_striped_target(seg)) {
|
||||
log_debug_metadata("Converting LV %s from %s to %s.",
|
||||
log_debug_metadata("Converting LV %s from %s to %s",
|
||||
display_lvname(lv), SEG_TYPE_NAME_STRIPED, SEG_TYPE_NAME_RAID0);
|
||||
if (!(seg = _convert_striped_to_raid0(lv, 1 /* alloc_metadata_devs */, 0 /* update_and_reload */, allocate_pvs)))
|
||||
return_0;
|
||||
@@ -3333,32 +3229,18 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
if (seg_is_raid0(seg)) {
|
||||
log_debug_metadata("Adding metadata LVs to %s.", display_lvname(lv));
|
||||
if (!_raid0_add_or_remove_metadata_lvs(lv, 1 /* update_and_reload */, allocate_pvs, NULL))
|
||||
return 0;
|
||||
/* raid0_meta -> raid4 needs clearing of MetaLVs in order to avoid raid disk role change issues in the kernel */
|
||||
return_0;
|
||||
/* raid0_meta -> raid4 needs clearing of MetaLVs in order to avoid raid disk role cahnge issues in the kernel */
|
||||
} else if (segtype_is_raid4(new_segtype) &&
|
||||
!_clear_meta_lvs(lv))
|
||||
return_0;
|
||||
|
||||
|
||||
/* Add the additional component LV pairs */
|
||||
log_debug_metadata("Adding %" PRIu32 " component LV pair(s) to %s.",
|
||||
new_image_count - lv_raid_image_count(lv),
|
||||
display_lvname(lv));
|
||||
extents_copied = seg->extents_copied;
|
||||
region_size = seg->region_size;
|
||||
seg_len = seg->len;
|
||||
stripe_size = seg->stripe_size;
|
||||
|
||||
if (seg_is_raid4(seg) || seg_is_any_raid5(seg)) {
|
||||
if (!(seg->segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID0_META)))
|
||||
return_0;
|
||||
seg->area_len = seg_lv(seg, 0)->le_count;
|
||||
lv->le_count = seg->len = seg->area_len * seg->area_count;
|
||||
seg->extents_copied = seg->region_size = 0;
|
||||
}
|
||||
|
||||
if (!_lv_raid_change_image_count(lv, new_image_count, allocate_pvs, NULL, 0, 1))
|
||||
return 0;
|
||||
return_0;
|
||||
|
||||
if (segtype_is_raid4(new_segtype) &&
|
||||
(!_shift_parity_dev(seg) ||
|
||||
@@ -3368,12 +3250,9 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
}
|
||||
|
||||
seg->segtype = new_segtype;
|
||||
seg->region_size = new_region_size ?: region_size;
|
||||
|
||||
/* FIXME Hard-coded raid0 to raid4/5/6 */
|
||||
seg->stripe_size = stripe_size;
|
||||
lv->le_count = seg->len = seg->area_len = seg_len;
|
||||
seg->extents_copied = extents_copied;
|
||||
seg->region_size = new_region_size;
|
||||
/* FIXME Hard-coded raid0 to raid4 */
|
||||
seg->area_len = seg->len;
|
||||
|
||||
_check_and_adjust_region_size(lv);
|
||||
|
||||
@@ -3383,7 +3262,7 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
return_0;
|
||||
|
||||
if (segtype_is_raid4(new_segtype)) {
|
||||
/* We had to rename SubLVs because of collision free shifting, rename back... */
|
||||
/* We had to rename SubLVs because of collision free sgifting, rename back... */
|
||||
if (!_rename_area_lvs(lv, NULL))
|
||||
return_0;
|
||||
if (!lv_update_and_reload(lv))
|
||||
@@ -3430,8 +3309,6 @@ static int _takeover_from_mirrored_to_raid0_meta(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_mirrored_to_raid1(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
first_seg(lv)->region_size = new_region_size;
|
||||
|
||||
return _convert_mirror_to_raid1(lv, new_segtype);
|
||||
}
|
||||
|
||||
@@ -3475,16 +3352,12 @@ static int _takeover_from_raid0_to_raid10(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid0_to_raid45(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 1 /* new_image_count */,
|
||||
2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_to_raid6(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 2 /* new_image_count */,
|
||||
3 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_to_striped(TAKEOVER_FN_ARGS)
|
||||
@@ -3525,16 +3398,12 @@ static int _takeover_from_raid0_meta_to_raid10(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid0_meta_to_raid45(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 1 /* new_image_count */,
|
||||
2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_meta_to_raid6(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 2 /* new_image_count */,
|
||||
3 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_meta_to_striped(TAKEOVER_FN_ARGS)
|
||||
@@ -3612,23 +3481,12 @@ static int _takeover_from_raid45_to_raid1(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid45_to_raid54(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _raid45_to_raid54_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count,
|
||||
2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid45_to_raid6(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
if (seg_is_raid4(first_seg(lv))) {
|
||||
struct segment_type *segtype = get_segtype_from_flag(lv->vg->cmd, SEG_RAID5_N);
|
||||
|
||||
if (!segtype ||
|
||||
!_raid45_to_raid54_wrapper(lv, segtype, yes, force, first_seg(lv)->area_count,
|
||||
1 /* data_copies */, 0, 0, 0, allocate_pvs))
|
||||
return 0;
|
||||
}
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 1 /* new_image_count */,
|
||||
3 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid45_to_striped(TAKEOVER_FN_ARGS)
|
||||
@@ -3638,26 +3496,22 @@ static int _takeover_from_raid45_to_striped(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid6_to_raid0(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _raid456_to_raid0_or_striped_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count - 2,
|
||||
1 /* data_copies */, 0, 0, 0, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid6_to_raid0_meta(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _raid456_to_raid0_or_striped_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count - 2,
|
||||
1 /* data_copies */, 0, 0, 0, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid6_to_raid45(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _raid456_to_raid0_or_striped_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count - 1,
|
||||
2 /* data_copies */, 0, 0, 0, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid6_to_striped(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _raid456_to_raid0_or_striped_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count - 2,
|
||||
2 /* data_copies */, 0, 0, 0, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
static int _takeover_from_striped_to_raid0(TAKEOVER_FN_ARGS)
|
||||
@@ -3694,9 +3548,7 @@ static int _takeover_from_striped_to_raid45(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_striped_to_raid6(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force,
|
||||
first_seg(lv)->area_count + 2 /* new_image_count */,
|
||||
3 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3813,144 +3665,33 @@ static int _log_prohibited_option(const struct lv_segment *seg_from,
|
||||
static int _set_convenient_raid456_segtype_to(const struct lv_segment *seg_from,
|
||||
const struct segment_type **segtype)
|
||||
{
|
||||
size_t len = min(strlen((*segtype)->name), strlen(lvseg_name(seg_from)));
|
||||
const struct segment_type *segtype_sav = *segtype;
|
||||
|
||||
/* Bail out if same RAID level is requested. */
|
||||
if (!strncmp((*segtype)->name, lvseg_name(seg_from), len))
|
||||
return 1;
|
||||
|
||||
/* Striped/raid0 -> raid5/6 */
|
||||
if (seg_is_striped(seg_from) || seg_is_any_raid0(seg_from)) {
|
||||
if (seg_is_striped(seg_from) || seg_is_raid4(seg_from)) {
|
||||
/* If this is any raid5 conversion request -> enforce raid5_n, because we convert from striped */
|
||||
if (segtype_is_any_raid5(*segtype) &&
|
||||
!segtype_is_raid5_n(*segtype)) {
|
||||
if (!(*segtype = get_segtype_from_flag(seg_from->lv->vg->cmd, SEG_RAID5_N)))
|
||||
return_0;
|
||||
goto replaced;
|
||||
log_error("Conversion to raid5_n not yet supported.");
|
||||
return 0;
|
||||
|
||||
/* If this is any raid6 conversion request -> enforce raid6_n_6, because we convert from striped */
|
||||
} else if (segtype_is_any_raid6(*segtype) &&
|
||||
!segtype_is_raid6_n_6(*segtype)) {
|
||||
if (!(*segtype = get_segtype_from_flag(seg_from->lv->vg->cmd, SEG_RAID6_N_6)))
|
||||
return_0;
|
||||
goto replaced;
|
||||
log_error("Conversion to raid6_n_6 not yet supported.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* raid4 -> raid5_n */
|
||||
} else if (seg_is_raid4(seg_from) &&
|
||||
/* Got to do check for raid5 -> raid6 ... */
|
||||
} else if (seg_is_any_raid5(seg_from) &&
|
||||
segtype_is_any_raid6(*segtype)) {
|
||||
log_error("Conversion not supported.");
|
||||
return 0;
|
||||
|
||||
/* ... and raid6 -> raid5 */
|
||||
} else if (seg_is_any_raid6(seg_from) &&
|
||||
segtype_is_any_raid5(*segtype)) {
|
||||
if (!(*segtype = get_segtype_from_flag(seg_from->lv->vg->cmd, SEG_RAID5_N)))
|
||||
return_0;
|
||||
goto replaced;
|
||||
|
||||
/* raid4/raid5_n -> striped/raid0/raid6 */
|
||||
} else if ((seg_is_raid4(seg_from) || seg_is_raid5_n(seg_from)) &&
|
||||
!segtype_is_striped(*segtype) &&
|
||||
!segtype_is_any_raid0(*segtype) &&
|
||||
!segtype_is_raid4(*segtype) &&
|
||||
!segtype_is_raid5_n(*segtype) &&
|
||||
!segtype_is_raid6_n_6(*segtype)) {
|
||||
if (!(*segtype = get_segtype_from_flag(seg_from->lv->vg->cmd, SEG_RAID6_N_6)))
|
||||
return_0;
|
||||
goto replaced;
|
||||
|
||||
/* ... and raid6 -> striped/raid0/raid4/raid5_n */
|
||||
} else if (seg_is_raid6_n_6(seg_from) &&
|
||||
!segtype_is_striped(*segtype) &&
|
||||
!segtype_is_any_raid0(*segtype) &&
|
||||
!segtype_is_raid4(*segtype) &&
|
||||
!segtype_is_raid5_n(*segtype)) {
|
||||
if (!(*segtype = get_segtype_from_flag(seg_from->lv->vg->cmd, SEG_RAID5_N)))
|
||||
return_0;
|
||||
goto replaced;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
replaced:
|
||||
log_warn("Replaced LV type %s with possible type %s.",
|
||||
segtype_sav->name, (*segtype)->name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* HM Helper:
|
||||
*
|
||||
* Change region size on raid @lv to @region_size if
|
||||
* different from current region_size and adjusted region size
|
||||
*/
|
||||
static int _region_size_change_requested(struct logical_volume *lv, int yes, uint32_t region_size)
|
||||
{
|
||||
uint32_t old_region_size;
|
||||
const char *seg_region_size_str;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
/* Caller should ensure this */
|
||||
if (!region_size)
|
||||
return_0;
|
||||
|
||||
/* CLI validation prvides the check but be caucious... */
|
||||
if (seg_is_any_raid0(seg))
|
||||
return_0;
|
||||
|
||||
if (region_size == seg->region_size) {
|
||||
log_warn("Region size wouldn't change on %s LV %s.",
|
||||
lvseg_name(seg), display_lvname(lv));
|
||||
log_error("Conversion not supported.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (region_size * 8 > lv->size) {
|
||||
log_error("Requested region size too large for LV %s size %s.",
|
||||
display_lvname(lv), display_size(lv->vg->cmd, lv->size));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (region_size < seg->stripe_size) {
|
||||
log_error("Region size for LV %s is smaller than stripe size.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_raid_in_sync(lv)) {
|
||||
log_error("Unable to change region size on %s LV %s while it is not in-sync.",
|
||||
lvseg_name(seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
old_region_size = seg->region_size;
|
||||
seg->region_size = region_size;
|
||||
seg_region_size_str = display_size(lv->vg->cmd, seg->region_size);
|
||||
_check_and_adjust_region_size(lv);
|
||||
|
||||
if (seg->region_size == old_region_size) {
|
||||
log_warn("Region size on %s did not change due to adjustment.",
|
||||
display_lvname(lv));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!yes && yes_no_prompt("Do you really want to change the region_size %s of LV %s to %s? [y/n]: ",
|
||||
display_size(lv->vg->cmd, old_region_size),
|
||||
display_lvname(lv), seg_region_size_str) == 'n') {
|
||||
log_error("Logical volume %s NOT converted", display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
if (sigint_caught())
|
||||
return_0;
|
||||
|
||||
/* Check for new region size causing bitmap to still fit metadata image LV */
|
||||
if (seg->meta_areas && seg_metatype(seg, 0) == AREA_LV && seg_metalv(seg, 0)->le_count <
|
||||
_raid_rmeta_extents(lv->vg->cmd, lv->le_count, seg->region_size, lv->vg->extent_size)) {
|
||||
log_error("Region size %s on %s is too small for metadata LV size.",
|
||||
seg_region_size_str, display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!lv_update_and_reload_origin(lv))
|
||||
return_0;
|
||||
|
||||
log_warn("Changed region size on RAID LV %s to %s.",
|
||||
display_lvname(lv), seg_region_size_str);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4005,8 +3746,7 @@ int lv_raid_convert(struct logical_volume *lv,
|
||||
const unsigned new_stripes,
|
||||
const unsigned new_stripe_size_supplied,
|
||||
const unsigned new_stripe_size,
|
||||
/* FIXME: workaround with volatile new_region_size until cli validation patches got merged */
|
||||
uint32_t new_region_size,
|
||||
const uint32_t new_region_size,
|
||||
struct dm_list *allocate_pvs)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
@@ -4031,20 +3771,6 @@ int lv_raid_convert(struct logical_volume *lv,
|
||||
if (segtype_is_raid(new_segtype) && !_check_max_raid_devices(new_image_count))
|
||||
return_0;
|
||||
|
||||
/* Change RAID region size */
|
||||
/*
|
||||
* FIXME: workaround with volatile new_region_size until the
|
||||
* cli validation patches got merged when we'll change
|
||||
* the API to have new_region_size_supplied to check for.
|
||||
*/
|
||||
if (new_region_size) {
|
||||
if (new_segtype == seg->segtype &&
|
||||
new_region_size != seg->region_size &&
|
||||
seg_is_raid(seg) && !seg_is_any_raid0(seg))
|
||||
return _region_size_change_requested(lv, yes, new_region_size);
|
||||
} else
|
||||
new_region_size = seg->region_size ? : get_default_region_size(lv->vg->cmd);
|
||||
|
||||
/*
|
||||
* Check acceptible options mirrors, region_size,
|
||||
* stripes and/or stripe_size have been provided.
|
||||
@@ -4092,12 +3818,6 @@ int lv_raid_convert(struct logical_volume *lv,
|
||||
new_region_size, allocate_pvs);
|
||||
}
|
||||
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size)
|
||||
{
|
||||
return _region_size_change_requested(lv, yes, new_region_size);
|
||||
}
|
||||
|
||||
static int _remove_partial_multi_segment_image(struct logical_volume *lv,
|
||||
struct dm_list *remove_pvs)
|
||||
{
|
||||
@@ -4207,7 +3927,7 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
|
||||
const char *action_str = rebuild ? "rebuild" : "replace";
|
||||
|
||||
if (seg_is_any_raid0(raid_seg)) {
|
||||
log_error("Can't replace any devices in %s LV %s.",
|
||||
log_error("Can't replace any devices in %s LV %s",
|
||||
lvseg_name(raid_seg), display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
@@ -4232,6 +3952,9 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!archive(lv->vg))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* How many sub-LVs are being removed?
|
||||
*/
|
||||
@@ -4249,9 +3972,6 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
|
||||
lv_is_on_pvs(seg_metalv(raid_seg, s), remove_pvs)) {
|
||||
match_count++;
|
||||
if (rebuild) {
|
||||
if ((match_count == 1) &&
|
||||
!archive(lv->vg))
|
||||
return_0;
|
||||
seg_lv(raid_seg, s)->status |= LV_REBUILD;
|
||||
seg_metalv(raid_seg, s)->status |= LV_REBUILD;
|
||||
}
|
||||
@@ -4297,9 +4017,6 @@ static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
|
||||
if (rebuild)
|
||||
goto skip_alloc;
|
||||
|
||||
if (!archive(lv->vg))
|
||||
return_0;
|
||||
|
||||
/* Prevent any PVs holding image components from being used for allocation */
|
||||
if (!_avoid_pvs_with_other_images_of_lv(lv, allocate_pvs)) {
|
||||
log_error("Failed to prevent PVs holding image components "
|
||||
|
@@ -132,10 +132,6 @@ struct dev_manager;
|
||||
#define segtype_is_raid6_nr(segtype) ((segtype)->flags & SEG_RAID6_NR ? 1 : 0)
|
||||
#define segtype_is_raid6_n_6(segtype) ((segtype)->flags & SEG_RAID6_N_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_zr(segtype) ((segtype)->flags & SEG_RAID6_ZR ? 1 : 0)
|
||||
#define segtype_is_raid6_ls_6(segtype) ((segtype)->flags & SEG_RAID6_LS_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_rs_6(segtype) ((segtype)->flags & SEG_RAID6_RS_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_la_6(segtype) ((segtype)->flags & SEG_RAID6_LA_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_ra_6(segtype) ((segtype)->flags & SEG_RAID6_RA_6 ? 1 : 0)
|
||||
#define segtype_is_any_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10_near(segtype) segtype_is_raid10(segtype)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -87,13 +87,13 @@ static int _raid_text_import_areas(struct lv_segment *seg,
|
||||
}
|
||||
|
||||
/* Metadata device comes first. */
|
||||
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
|
||||
log_error("Couldn't find volume '%s' for segment '%s'.",
|
||||
cv->v.str ? : "NULL", seg_name);
|
||||
return 0;
|
||||
}
|
||||
if (!seg_is_raid0(seg)) {
|
||||
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
|
||||
log_error("Couldn't find volume '%s' for segment '%s'.",
|
||||
cv->v.str ? : "NULL", seg_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strstr(lv->name, "_rmeta_")) {
|
||||
if (!set_lv_segment_area_lv(seg, s, lv, 0, RAID_META))
|
||||
return_0;
|
||||
cv = cv->next;
|
||||
@@ -537,20 +537,14 @@ static const struct raid_type {
|
||||
{ SEG_TYPE_NAME_RAID10, 0, SEG_RAID10 | SEG_AREAS_MIRRORED },
|
||||
{ SEG_TYPE_NAME_RAID4, 1, SEG_RAID4 },
|
||||
{ SEG_TYPE_NAME_RAID5, 1, SEG_RAID5 },
|
||||
{ SEG_TYPE_NAME_RAID5_N, 1, SEG_RAID5_N },
|
||||
{ SEG_TYPE_NAME_RAID5_LA, 1, SEG_RAID5_LA },
|
||||
{ SEG_TYPE_NAME_RAID5_LS, 1, SEG_RAID5_LS },
|
||||
{ SEG_TYPE_NAME_RAID5_RA, 1, SEG_RAID5_RA },
|
||||
{ SEG_TYPE_NAME_RAID5_RS, 1, SEG_RAID5_RS },
|
||||
{ SEG_TYPE_NAME_RAID6, 2, SEG_RAID6 },
|
||||
{ SEG_TYPE_NAME_RAID6_N_6, 2, SEG_RAID6_N_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_NC, 2, SEG_RAID6_NC },
|
||||
{ SEG_TYPE_NAME_RAID6_NR, 2, SEG_RAID6_NR },
|
||||
{ SEG_TYPE_NAME_RAID6_ZR, 2, SEG_RAID6_ZR },
|
||||
{ SEG_TYPE_NAME_RAID6_LS_6, 2, SEG_RAID6_LS_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_RS_6, 2, SEG_RAID6_RS_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_LA_6, 2, SEG_RAID6_LA_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_RA_6, 2, SEG_RAID6_RA_6 }
|
||||
{ SEG_TYPE_NAME_RAID6_ZR, 2, SEG_RAID6_ZR }
|
||||
};
|
||||
|
||||
static struct segment_type *_init_raid_segtype(struct cmd_context *cmd,
|
||||
|
@@ -1,5 +1,4 @@
|
||||
dm_bit_get_last
|
||||
dm_bit_get_prev
|
||||
dm_stats_update_regions_from_fd
|
||||
dm_bitset_parse_list
|
||||
dm_stats_bind_from_fd
|
||||
|
@@ -1851,10 +1851,10 @@ static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
|
||||
dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
|
||||
else {
|
||||
if (_log_suppress || dmt->ioctl_errno == EINTR)
|
||||
log_verbose("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
|
||||
log_verbose("device-mapper: %s ioctl on %s%s%s%.0d%s%.0d%s%s "
|
||||
"failed: %s",
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
dmt->major > 0 ? "(" : "",
|
||||
dmt->major > 0 ? dmt->major : 0,
|
||||
dmt->major > 0 ? ":" : "",
|
||||
@@ -1863,10 +1863,10 @@ static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
|
||||
dmt->major > 0 ? ")" : "",
|
||||
strerror(dmt->ioctl_errno));
|
||||
else
|
||||
log_error("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
|
||||
log_error("device-mapper: %s ioctl on %s%s%s%.0d%s%.0d%s%s "
|
||||
"failed: %s",
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
dmi->name, dmi->uuid,
|
||||
dmt->major > 0 ? "(" : "",
|
||||
dmt->major > 0 ? dmt->major : 0,
|
||||
dmt->major > 0 ? ":" : "",
|
||||
|
@@ -1320,9 +1320,8 @@ int dm_stats_get_group_descriptor(const struct dm_stats *dms,
|
||||
* On success the function returns a pointer to an array of uint64_t
|
||||
* containing the IDs of the newly created regions. The region_id
|
||||
* array is terminated by the value DM_STATS_REGION_NOT_PRESENT and
|
||||
* should be freed using dm_free() when no longer required.
|
||||
*
|
||||
* On error NULL is returned.
|
||||
* should be freed using dm_free() when no longer required. On error
|
||||
* NULL is returned.
|
||||
*
|
||||
* Following a call to dm_stats_create_regions_from_fd() the handle
|
||||
* is guaranteed to be in a listed state, and to contain any region
|
||||
@@ -1330,43 +1329,12 @@ int dm_stats_get_group_descriptor(const struct dm_stats *dms,
|
||||
*
|
||||
* The group_id for the new group is equal to the region_id value in
|
||||
* the first array element.
|
||||
*
|
||||
*/
|
||||
uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
int group, int precise,
|
||||
struct dm_histogram *bounds,
|
||||
const char *alias);
|
||||
/*
|
||||
* Update a group of regions that correspond to the extents of a file
|
||||
* in the filesystem, adding and removing regions to account for
|
||||
* allocation changes in the underlying file.
|
||||
*
|
||||
* File descriptor fd must reference a regular file, open for reading,
|
||||
* in a local file system that supports the FIEMAP ioctl, and that
|
||||
* returns data describing the physical location of extents.
|
||||
*
|
||||
* The file descriptor can be closed by the caller following the call
|
||||
* to dm_stats_update_regions_from_fd().
|
||||
*
|
||||
* On success the function returns a pointer to an array of uint64_t
|
||||
* containing the IDs of the updated regions (including any existing
|
||||
* regions that were not modified by the call).
|
||||
*
|
||||
* The region_id array is terminated by the special value
|
||||
* DM_STATS_REGION_NOT_PRESENT and should be freed using dm_free()
|
||||
* when no longer required.
|
||||
*
|
||||
* On error NULL is returned.
|
||||
*
|
||||
* Following a call to dm_stats_update_regions_from_fd() the handle
|
||||
* is guaranteed to be in a listed state, and to contain any region
|
||||
* and group identifiers created by the operation.
|
||||
*
|
||||
* This function cannot be used with file mapped regions that are
|
||||
* not members of a group: either group the regions, or remove them
|
||||
* and re-map them with dm_stats_create_regions_from_fd().
|
||||
*/
|
||||
uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
uint64_t group_id);
|
||||
|
||||
/*
|
||||
* Call this to actually run the ioctl.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2005-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of the device-mapper userspace tools.
|
||||
*
|
||||
@@ -47,19 +47,13 @@ enum {
|
||||
SEG_RAID1,
|
||||
SEG_RAID10,
|
||||
SEG_RAID4,
|
||||
SEG_RAID5_N,
|
||||
SEG_RAID5_LA,
|
||||
SEG_RAID5_RA,
|
||||
SEG_RAID5_LS,
|
||||
SEG_RAID5_RS,
|
||||
SEG_RAID6_N_6,
|
||||
SEG_RAID6_ZR,
|
||||
SEG_RAID6_NR,
|
||||
SEG_RAID6_NC,
|
||||
SEG_RAID6_LS_6,
|
||||
SEG_RAID6_RS_6,
|
||||
SEG_RAID6_LA_6,
|
||||
SEG_RAID6_RA_6,
|
||||
};
|
||||
|
||||
/* FIXME Add crypt and multipath support */
|
||||
@@ -87,20 +81,13 @@ static const struct {
|
||||
{ SEG_RAID1, "raid1"},
|
||||
{ SEG_RAID10, "raid10"},
|
||||
{ SEG_RAID4, "raid4"},
|
||||
{ SEG_RAID5_N, "raid5_n"},
|
||||
{ SEG_RAID5_LA, "raid5_la"},
|
||||
{ SEG_RAID5_RA, "raid5_ra"},
|
||||
{ SEG_RAID5_LS, "raid5_ls"},
|
||||
{ SEG_RAID5_RS, "raid5_rs"},
|
||||
{ SEG_RAID6_N_6,"raid6_n_6"},
|
||||
{ SEG_RAID6_ZR, "raid6_zr"},
|
||||
{ SEG_RAID6_NR, "raid6_nr"},
|
||||
{ SEG_RAID6_NC, "raid6_nc"},
|
||||
{ SEG_RAID6_LS_6, "raid6_ls_6"},
|
||||
{ SEG_RAID6_RS_6, "raid6_rs_6"},
|
||||
{ SEG_RAID6_LA_6, "raid6_la_6"},
|
||||
{ SEG_RAID6_RA_6, "raid6_ra_6"},
|
||||
|
||||
|
||||
/*
|
||||
* WARNING: Since 'raid' target overloads this 1:1 mapping table
|
||||
@@ -2153,19 +2140,13 @@ static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID10:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
if (!area->dev_node) {
|
||||
EMIT_PARAMS(*pos, " -");
|
||||
break;
|
||||
@@ -2607,19 +2588,13 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID10:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
target_type_is_raid = 1;
|
||||
r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
|
||||
params, paramsize);
|
||||
@@ -3894,19 +3869,13 @@ int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
|
||||
case SEG_RAID0_META:
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
break;
|
||||
default:
|
||||
log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
|
||||
|
@@ -261,9 +261,6 @@ static int _stats_group_id_present(const struct dm_stats *dms, uint64_t id)
|
||||
{
|
||||
struct dm_stats_group *group = NULL;
|
||||
|
||||
if (id == DM_STATS_GROUP_NOT_PRESENT)
|
||||
return 0;
|
||||
|
||||
if (!dms || !dms->regions)
|
||||
return_0;
|
||||
|
||||
@@ -3868,37 +3865,6 @@ static int _extent_start_compare(const void *p1, const void *p2)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resize the group bitmap corresponding to group_id so that it can
|
||||
* contain at least num_regions members.
|
||||
*/
|
||||
static int _stats_resize_group(struct dm_stats_group *group, int num_regions)
|
||||
{
|
||||
int last_bit = dm_bit_get_last(group->regions);
|
||||
dm_bitset_t new, old;
|
||||
|
||||
if (last_bit >= num_regions) {
|
||||
log_error("Cannot resize group bitmap to %d with bit %d set.",
|
||||
num_regions, last_bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_very_verbose("Resizing group bitmap from %d to %d (last_bit: %d).",
|
||||
group->regions[0], num_regions, last_bit);
|
||||
|
||||
new = dm_bitset_create(NULL, num_regions);
|
||||
if (!new) {
|
||||
log_error("Could not allocate memory for new group bitmap.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
old = group->regions;
|
||||
dm_bit_copy(new, old);
|
||||
group->regions = new;
|
||||
dm_bitset_destroy(old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _stats_create_group(struct dm_stats *dms, dm_bitset_t regions,
|
||||
const char *alias, uint64_t *group_id)
|
||||
{
|
||||
@@ -4252,8 +4218,8 @@ bad:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _stats_add_file_extent(int fd, struct dm_pool *mem, uint64_t id,
|
||||
struct fiemap_extent *fm_ext)
|
||||
static int _stats_add_extent(struct dm_pool *mem, struct fiemap_extent *fm_ext,
|
||||
uint64_t id)
|
||||
{
|
||||
struct _extent extent;
|
||||
|
||||
@@ -4263,10 +4229,8 @@ static int _stats_add_file_extent(int fd, struct dm_pool *mem, uint64_t id,
|
||||
/* convert bytes to dm (512b) sectors */
|
||||
extent.start = fm_ext->fe_physical >> SECTOR_SHIFT;
|
||||
extent.len = fm_ext->fe_length >> SECTOR_SHIFT;
|
||||
extent.id = id;
|
||||
|
||||
log_very_verbose("Extent " FMTu64 " on fd %d at " FMTu64 "+"
|
||||
FMTu64, extent.id, fd, extent.start, extent.len);
|
||||
extent.id = id;
|
||||
|
||||
if (!dm_pool_grow_object(mem, &extent,
|
||||
sizeof(extent))) {
|
||||
@@ -4274,6 +4238,7 @@ static int _stats_add_file_extent(int fd, struct dm_pool *mem, uint64_t id,
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
||||
}
|
||||
|
||||
/* test for the boundary of an extent */
|
||||
@@ -4290,7 +4255,7 @@ do { \
|
||||
*(to) = *(from); \
|
||||
} while (0)
|
||||
|
||||
static uint64_t _stats_map_extents(int fd, struct dm_pool *mem,
|
||||
static uint64_t _stats_map_extents(struct dm_pool *mem,
|
||||
struct fiemap *fiemap,
|
||||
struct fiemap_extent *fm_ext,
|
||||
struct fiemap_extent *fm_last,
|
||||
@@ -4321,34 +4286,16 @@ static uint64_t _stats_map_extents(int fd, struct dm_pool *mem,
|
||||
& (FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_DELALLOC))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Begin a new extent if the current physical address differs
|
||||
* from the expected address yielded by fm_last.fe_physical +
|
||||
* fm_last.fe_length.
|
||||
*
|
||||
* A logical discontinuity is seen at the start of the file if
|
||||
* unwritten space exists before the first extent: do not add
|
||||
* any extent record until we have accumulated a non-zero length
|
||||
* in fm_pending.
|
||||
*/
|
||||
if (fm_pending->fe_length &&
|
||||
ext_boundary(fm_ext[i], expected)) {
|
||||
if (!_stats_add_file_extent(fd, mem, nr_extents,
|
||||
fm_pending))
|
||||
if (ext_boundary(fm_ext[i], expected)) {
|
||||
if (!_stats_add_extent(mem, fm_pending, nr_extents))
|
||||
goto_bad;
|
||||
nr_extents++;
|
||||
/* Begin a new pending extent. */
|
||||
ext_copy(fm_pending, fm_ext + i);
|
||||
} else {
|
||||
expected = 0;
|
||||
/* Begin a new pending extent for extent 0. If there is
|
||||
* a hole at the start of the file, the first allocated
|
||||
* extent will have a non-zero fe_logical. Detect this
|
||||
* case by testing fm_pending->fe_length: if no length
|
||||
* has been accumulated we are handling the first
|
||||
* physical extent of the file.
|
||||
*/
|
||||
if (!fm_pending->fe_length || fm_ext[i].fe_logical == 0)
|
||||
/* Begin a new pending extent for extent 0. */
|
||||
if (fm_ext[i].fe_logical == 0)
|
||||
ext_copy(fm_pending, fm_ext + i);
|
||||
else
|
||||
/* accumulate this logical extent's length */
|
||||
@@ -4361,8 +4308,8 @@ static uint64_t _stats_map_extents(int fd, struct dm_pool *mem,
|
||||
* If the file only has a single extent, no boundary is ever
|
||||
* detected to trigger addition of the first extent.
|
||||
*/
|
||||
if (*eof || (fm_ext[i - 1].fe_logical == 0)) {
|
||||
_stats_add_file_extent(fd, mem, nr_extents, fm_pending);
|
||||
if (fm_ext[i - 1].fe_logical == 0) {
|
||||
_stats_add_extent(mem, fm_pending, nr_extents);
|
||||
nr_extents++;
|
||||
}
|
||||
|
||||
@@ -4394,6 +4341,7 @@ static struct _extent *_stats_get_extents_for_file(struct dm_pool *mem, int fd,
|
||||
struct _extent *extents;
|
||||
unsigned long flags = 0;
|
||||
uint64_t *buf;
|
||||
int rc;
|
||||
|
||||
buf = dm_zalloc(STATS_FIE_BUF_LEN);
|
||||
if (!buf) {
|
||||
@@ -4413,7 +4361,7 @@ static struct _extent *_stats_get_extents_for_file(struct dm_pool *mem, int fd,
|
||||
if (!dm_pool_begin_object(mem, sizeof(*extents)))
|
||||
return NULL;
|
||||
|
||||
flags = FIEMAP_FLAG_SYNC;
|
||||
flags |= FIEMAP_FLAG_SYNC;
|
||||
|
||||
do {
|
||||
/* start of ioctl loop - zero size and set count to bufsize */
|
||||
@@ -4422,8 +4370,10 @@ static struct _extent *_stats_get_extents_for_file(struct dm_pool *mem, int fd,
|
||||
fiemap->fm_extent_count = *count;
|
||||
|
||||
/* get count-sized chunk of extents */
|
||||
if (ioctl(fd, FS_IOC_FIEMAP, (unsigned long) fiemap) < 0) {
|
||||
if (errno == EBADR)
|
||||
rc = ioctl(fd, FS_IOC_FIEMAP, (unsigned long) fiemap);
|
||||
if (rc < 0) {
|
||||
rc = -errno;
|
||||
if (rc == -EBADR)
|
||||
log_err_once("FIEMAP failed with unknown "
|
||||
"flags %x.", fiemap->fm_flags);
|
||||
goto bad;
|
||||
@@ -4433,9 +4383,8 @@ static struct _extent *_stats_get_extents_for_file(struct dm_pool *mem, int fd,
|
||||
if (fiemap->fm_mapped_extents == 0)
|
||||
break;
|
||||
|
||||
nr_extents += _stats_map_extents(fd, mem, fiemap, fm_ext,
|
||||
&fm_last, &fm_pending,
|
||||
nr_extents, &eof);
|
||||
nr_extents += _stats_map_extents(mem, fiemap, fm_ext, &fm_last,
|
||||
&fm_pending, nr_extents, &eof);
|
||||
|
||||
/* check for extent mapping error */
|
||||
if (eof < 0)
|
||||
@@ -4463,140 +4412,23 @@ bad:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define MATCH_EXTENT(e, s, l) \
|
||||
(((e).start == (s)) && ((e).len == (l)))
|
||||
|
||||
static struct _extent *_find_extent(size_t nr_extents, struct _extent *extents,
|
||||
uint64_t start, uint64_t len)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < nr_extents; i++)
|
||||
if (MATCH_EXTENT(extents[i], start, len))
|
||||
return extents + i;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up a table of region_id values that were created during a
|
||||
* failed dm_stats_create_regions_from_fd, or dm_stats_update_regions_from_fd
|
||||
* operation.
|
||||
*/
|
||||
static void _stats_cleanup_region_ids(struct dm_stats *dms, uint64_t *regions,
|
||||
uint64_t nr_regions)
|
||||
{
|
||||
uint64_t i;
|
||||
|
||||
for (i = 0; i < nr_regions; i++)
|
||||
if (!_stats_delete_region(dms, regions[i]))
|
||||
log_error("Could not delete region " FMTu64 ".", i);
|
||||
}
|
||||
|
||||
/*
|
||||
* First update pass: prune no-longer-allocated extents from the group
|
||||
* and build a table of the remaining extents so that their creation
|
||||
* can be skipped in the second pass.
|
||||
*/
|
||||
static int _stats_unmap_regions(struct dm_stats *dms, uint64_t group_id,
|
||||
struct dm_pool *mem, struct _extent *extents,
|
||||
struct _extent **old_extents, uint64_t *count,
|
||||
int *regroup)
|
||||
{
|
||||
struct dm_stats_region *region = NULL;
|
||||
struct dm_stats_group *group = NULL;
|
||||
int64_t nr_kept, nr_old, i;
|
||||
struct _extent ext;
|
||||
|
||||
group = &dms->groups[group_id];
|
||||
|
||||
log_very_verbose("Checking for changed file extents in group ID "
|
||||
FMTu64, group_id);
|
||||
|
||||
if (!dm_pool_begin_object(mem, sizeof(**old_extents))) {
|
||||
log_error("Could not allocate extent table.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
nr_kept = nr_old = 0; /* counts of old and retained extents */
|
||||
|
||||
/*
|
||||
* First pass: delete de-allocated extents and set regroup=1 if
|
||||
* deleting the current group leader.
|
||||
*/
|
||||
i = dm_bit_get_last(group->regions);
|
||||
for (; i >= 0; i = dm_bit_get_prev(group->regions, i)) {
|
||||
region = &dms->regions[i];
|
||||
nr_old++;
|
||||
|
||||
if (_find_extent(*count, extents,
|
||||
region->start, region->len)) {
|
||||
ext.start = region->start;
|
||||
ext.len = region->len;
|
||||
ext.id = i;
|
||||
nr_kept++;
|
||||
|
||||
dm_pool_grow_object(mem, &ext,
|
||||
sizeof(ext));
|
||||
log_very_verbose("Kept region " FMTu64, i);
|
||||
} else {
|
||||
|
||||
if (i == group_id)
|
||||
*regroup = 1;
|
||||
|
||||
if (!_stats_delete_region(dms, i)) {
|
||||
log_error("Could not remove region ID " FMTu64,
|
||||
i);
|
||||
goto out;
|
||||
}
|
||||
|
||||
log_very_verbose("Deleted region " FMTu64, i);
|
||||
}
|
||||
}
|
||||
|
||||
*old_extents = dm_pool_end_object(mem);
|
||||
if (!*old_extents) {
|
||||
log_error("Could not finalize region extent table.");
|
||||
goto out;
|
||||
}
|
||||
log_very_verbose("Kept %ld of %ld old extents",
|
||||
nr_kept, nr_old);
|
||||
log_very_verbose("Found " FMTu64 " new extents",
|
||||
*count - nr_kept);
|
||||
|
||||
return nr_kept;
|
||||
out:
|
||||
dm_pool_abandon_object(mem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create or update a set of regions representing the extents of a file
|
||||
* and return a table of uint64_t region_id values. The number of regions
|
||||
* Create a set of regions representing the extents of a file and
|
||||
* return a table of uint64_t region_id values. The number of regions
|
||||
* created is returned in the memory pointed to by count (which must be
|
||||
* non-NULL).
|
||||
*
|
||||
* If group_id is not equal to DM_STATS_GROUP_NOT_PRESENT, it is assumed
|
||||
* that group_id corresponds to a group containing existing regions that
|
||||
* were mapped to this file at an earlier time: regions will be added or
|
||||
* removed to reflect the current status of the file.
|
||||
*/
|
||||
static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
struct dm_histogram *bounds,
|
||||
int precise, uint64_t group_id,
|
||||
uint64_t *count, int *regroup)
|
||||
static uint64_t *_stats_create_file_regions(struct dm_stats *dms, int fd,
|
||||
struct dm_histogram *bounds,
|
||||
int precise, uint64_t *count)
|
||||
{
|
||||
struct _extent *extents = NULL, *old_extents = NULL;
|
||||
uint64_t *regions = NULL, fail_region;
|
||||
struct dm_stats_group *group = NULL;
|
||||
uint64_t *regions = NULL, i, fail_region;
|
||||
struct dm_pool *extent_mem = NULL;
|
||||
struct _extent *old_ext;
|
||||
struct _extent *extents = NULL;
|
||||
char *hist_arg = NULL;
|
||||
int update, num_bits;
|
||||
struct statfs fsbuf;
|
||||
int64_t nr_kept = 0, i;
|
||||
struct stat buf;
|
||||
|
||||
update = _stats_group_id_present(dms, group_id);
|
||||
|
||||
#ifdef BTRFS_SUPER_MAGIC
|
||||
if (fstatfs(fd, &fsbuf)) {
|
||||
log_error("fstatfs failed for fd %d", fd);
|
||||
@@ -4625,18 +4457,6 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If regroup is set here, we are creating a new filemap: otherwise
|
||||
* we are updating a group with a valid group identifier in group_id.
|
||||
*/
|
||||
if (update)
|
||||
log_very_verbose("Updating extents from fd %d with group ID "
|
||||
FMTu64 " on (%d:%d)", fd, group_id,
|
||||
major(buf.st_dev), minor(buf.st_dev));
|
||||
else
|
||||
log_very_verbose("Mapping extents from fd %d on (%d:%d)",
|
||||
fd, major(buf.st_dev), minor(buf.st_dev));
|
||||
|
||||
/* Use a temporary, private pool for the extent table. This avoids
|
||||
* hijacking the dms->mem (region table) pool which would lead to
|
||||
* interleaving temporary allocations with dm_stats_list() data,
|
||||
@@ -4650,41 +4470,19 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (update) {
|
||||
group = &dms->groups[group_id];
|
||||
if ((nr_kept = _stats_unmap_regions(dms, group_id, extent_mem,
|
||||
extents, &old_extents,
|
||||
count, regroup)) < 0)
|
||||
goto_out;
|
||||
}
|
||||
|
||||
if (bounds)
|
||||
if (bounds) {
|
||||
/* _build_histogram_arg enables precise if vals < 1ms. */
|
||||
if (!(hist_arg = _build_histogram_arg(bounds, &precise)))
|
||||
goto_out;
|
||||
}
|
||||
|
||||
/* make space for end-of-table marker */
|
||||
if (!(regions = dm_malloc((1 + *count) * sizeof(*regions)))) {
|
||||
log_error("Could not allocate memory for region IDs.");
|
||||
goto_out;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Second pass (first for non-update case): create regions for
|
||||
* all extents not retained from the prior mapping, and insert
|
||||
* retained regions into the table of region_id values.
|
||||
*
|
||||
* If a regroup is not scheduled, set group bits for newly
|
||||
* created regions in the group leader bitmap.
|
||||
*/
|
||||
for (i = 0; i < *count; i++) {
|
||||
if (update) {
|
||||
if ((old_ext = _find_extent(nr_kept, old_extents,
|
||||
extents[i].start,
|
||||
extents[i].len))) {
|
||||
regions[i] = old_ext->id;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!_stats_create_region(dms, regions + i, extents[i].start,
|
||||
extents[i].len, -1, precise, hist_arg,
|
||||
dms->program_id, "")) {
|
||||
@@ -4693,39 +4491,13 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
extents[i].start);
|
||||
goto out_remove;
|
||||
}
|
||||
|
||||
log_very_verbose("Created new region mapping " FMTu64 "+" FMTu64
|
||||
" with region ID " FMTu64, extents[i].start,
|
||||
extents[i].len, regions[i]);
|
||||
|
||||
if (!*regroup && update) {
|
||||
/* expand group bitmap */
|
||||
if (regions[i] > (group->regions[0] - 1)) {
|
||||
num_bits = regions[i] + *count;
|
||||
if (!_stats_resize_group(group, num_bits)) {
|
||||
log_error("Failed to resize group "
|
||||
"bitmap.");
|
||||
goto out_remove;
|
||||
}
|
||||
}
|
||||
dm_bit_set(group->regions, regions[i]);
|
||||
}
|
||||
|
||||
}
|
||||
regions[*count] = DM_STATS_REGION_NOT_PRESENT;
|
||||
|
||||
/* Update group leader aux_data for new group members. */
|
||||
if (!*regroup && update)
|
||||
if (!_stats_set_aux(dms, group_id,
|
||||
dms->regions[group_id].aux_data))
|
||||
log_error("Failed to update group aux_data.");
|
||||
|
||||
if (bounds)
|
||||
dm_free(hist_arg);
|
||||
|
||||
dm_pool_free(extent_mem, extents);
|
||||
dm_pool_destroy(extent_mem);
|
||||
dm_free(hist_arg);
|
||||
return regions;
|
||||
|
||||
out_remove:
|
||||
@@ -4739,15 +4511,13 @@ out_remove:
|
||||
dm_stats_list(dms, NULL);
|
||||
|
||||
fail_region = i;
|
||||
_stats_cleanup_region_ids(dms, regions, fail_region);
|
||||
for (i = 0; i < fail_region; i++)
|
||||
if (!_stats_delete_region(dms, regions[i]))
|
||||
log_error("Could not delete region " FMTu64 ".", i);
|
||||
|
||||
*count = 0;
|
||||
|
||||
out:
|
||||
/*
|
||||
* The table of file extents in 'extents' is always built, so free
|
||||
* it explicitly: this will also free any 'old_extents' table that
|
||||
* was later allocated from the 'extent_mem' pool by this function.
|
||||
*/
|
||||
dm_pool_free(extent_mem, extents);
|
||||
dm_pool_destroy(extent_mem);
|
||||
dm_free(hist_arg);
|
||||
@@ -4761,16 +4531,15 @@ uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
const char *alias)
|
||||
{
|
||||
uint64_t *regions, count = 0;
|
||||
int regroup = 1;
|
||||
|
||||
if (alias && !group) {
|
||||
log_error("Cannot set alias without grouping regions.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(regions = _stats_map_file_regions(dms, fd, bounds, precise,
|
||||
-1, &count, ®roup)))
|
||||
return NULL;
|
||||
regions = _stats_create_file_regions(dms, fd, bounds, precise, &count);
|
||||
if (!regions)
|
||||
return_0;
|
||||
|
||||
if (!group)
|
||||
return regions;
|
||||
@@ -4784,97 +4553,11 @@ uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
|
||||
return regions;
|
||||
out:
|
||||
_stats_cleanup_region_ids(dms, regions, count);
|
||||
dm_free(regions);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
uint64_t group_id)
|
||||
{
|
||||
struct dm_histogram *bounds = NULL;
|
||||
int nr_bins, precise, regroup;
|
||||
uint64_t *regions, count = 0;
|
||||
const char *alias = NULL;
|
||||
|
||||
if (!dms->regions || !dm_stats_group_present(dms, group_id)) {
|
||||
if (!dm_stats_list(dms, dms->program_id)) {
|
||||
log_error("Could not obtain region list while "
|
||||
"updating group " FMTu64 ".", group_id);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dm_stats_group_present(dms, group_id)) {
|
||||
log_error("Group ID " FMTu64 " does not exist.", group_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the extent corresponding to the group leader's region has been
|
||||
* deallocated, _stats_map_file_regions() will remove the region and
|
||||
* the group. In this case, regroup will be set by the call and the
|
||||
* group will be re-created using saved values.
|
||||
*/
|
||||
regroup = 0;
|
||||
|
||||
/*
|
||||
* A copy of the alias is needed to re-create the group when regroup=1.
|
||||
*/
|
||||
if (dms->groups[group_id].alias) {
|
||||
alias = dm_strdup(dms->groups[group_id].alias);
|
||||
if (!alias) {
|
||||
log_error("Failed to allocate group alias string.");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (dms->regions[group_id].bounds) {
|
||||
/*
|
||||
* A copy of the histogram bounds must be passed to
|
||||
* _stats_map_file_regions() to be used when creating new
|
||||
* regions: it is not safe to use the copy in the current group
|
||||
* leader since it may be destroyed during the first group
|
||||
* update pass.
|
||||
*/
|
||||
nr_bins = dms->regions[group_id].bounds->nr_bins;
|
||||
bounds = _alloc_dm_histogram(nr_bins);
|
||||
if (!bounds) {
|
||||
log_error("Could not allocate memory for group "
|
||||
"histogram bounds.");
|
||||
return NULL;
|
||||
}
|
||||
_stats_copy_histogram_bounds(bounds,
|
||||
dms->regions[group_id].bounds);
|
||||
}
|
||||
|
||||
precise = (dms->regions[group_id].timescale == 1);
|
||||
|
||||
regions = _stats_map_file_regions(dms, fd, bounds, precise,
|
||||
group_id, &count, ®roup);
|
||||
|
||||
if (!regions)
|
||||
goto bad;
|
||||
|
||||
if (!dm_stats_list(dms, NULL))
|
||||
goto bad;
|
||||
|
||||
if (regroup)
|
||||
if (!_stats_group_file_regions(dms, regions, count, alias))
|
||||
goto bad;
|
||||
|
||||
dm_free(bounds);
|
||||
dm_free((char *) alias);
|
||||
return regions;
|
||||
bad:
|
||||
_stats_cleanup_region_ids(dms, regions, count);
|
||||
dm_free(bounds);
|
||||
dm_free((char *) alias);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#else /* HAVE_LINUX_FIEMAP */
|
||||
|
||||
uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
int group, int precise,
|
||||
struct dm_histogram *bounds,
|
||||
@@ -4883,13 +4566,6 @@ uint64_t *dm_stats_create_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
log_error("File mapping requires FIEMAP ioctl support.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
uint64_t group_id)
|
||||
{
|
||||
log_error("File mapping requires FIEMAP ioctl support.");
|
||||
return 0;
|
||||
}
|
||||
#endif /* HAVE_LINUX_FIEMAP */
|
||||
|
||||
/*
|
||||
|
@@ -468,12 +468,10 @@ const char *dm_size_to_string(struct dm_pool *mem, uint64_t size,
|
||||
unsigned base = BASE_UNKNOWN;
|
||||
unsigned s;
|
||||
int precision;
|
||||
double d;
|
||||
uint64_t byte = UINT64_C(0);
|
||||
uint64_t units = UINT64_C(1024);
|
||||
char *size_buf = NULL;
|
||||
char new_unit_type = '\0', unit_type_buf[2];
|
||||
const char *prefix = "";
|
||||
const char * const size_str[][3] = {
|
||||
/* BASE_UNKNOWN */
|
||||
{" ", " ", " "}, /* [0] */
|
||||
@@ -572,7 +570,7 @@ const char *dm_size_to_string(struct dm_pool *mem, uint64_t size,
|
||||
byte = unit_factor;
|
||||
} else {
|
||||
/* Human-readable style */
|
||||
if (unit_type == 'H' || unit_type == 'R') {
|
||||
if (unit_type == 'H') {
|
||||
units = UINT64_C(1000);
|
||||
base = BASE_1000;
|
||||
} else {
|
||||
@@ -588,16 +586,6 @@ const char *dm_size_to_string(struct dm_pool *mem, uint64_t size,
|
||||
for (s = 0; s < NUM_UNIT_PREFIXES && size < byte; s++)
|
||||
byte /= units;
|
||||
|
||||
if ((s < NUM_UNIT_PREFIXES) &&
|
||||
((unit_type == 'R') || (unit_type == 'r'))) {
|
||||
/* When the rounding would cause difference, add '<' prefix
|
||||
* i.e. 2043M is more then 1.9949G prints <2.00G
|
||||
* This version is for 2 digits fixed precision */
|
||||
d = 100. * (double) size / byte;
|
||||
if (!_close_enough(floorl(d), nearbyintl(d)))
|
||||
prefix = "<";
|
||||
}
|
||||
|
||||
include_suffix = 1;
|
||||
}
|
||||
|
||||
@@ -611,7 +599,7 @@ const char *dm_size_to_string(struct dm_pool *mem, uint64_t size,
|
||||
precision = 2;
|
||||
}
|
||||
|
||||
snprintf(size_buf, SIZE_BUF - 1, "%s%.*f%s", prefix, precision,
|
||||
snprintf(size_buf, SIZE_BUF - 1, "%.*f%s", precision,
|
||||
(double) size / byte, include_suffix ? size_str[base + s][suffix_type] : "");
|
||||
|
||||
return size_buf;
|
||||
@@ -651,8 +639,6 @@ uint64_t dm_units_to_factor(const char *units, char *unit_type,
|
||||
switch (*units) {
|
||||
case 'h':
|
||||
case 'H':
|
||||
case 'r':
|
||||
case 'R':
|
||||
multiplier = v = UINT64_C(1);
|
||||
*unit_type = *units;
|
||||
break;
|
||||
|
@@ -46,6 +46,8 @@ MAN8GEN=lvm-config.8 lvm-dumpconfig.8 lvm-fullreport.8 lvm-lvpoll.8 \
|
||||
vgrename.8 vgs.8 vgscan.8 vgsplit.8 \
|
||||
lvmsar.8 lvmsadc.8 lvmdiskscan.8 lvmchange.8
|
||||
|
||||
MAN8+=$(MAN8GEN)
|
||||
|
||||
ifeq ($(MAKECMDGOALS),all_man)
|
||||
MAN_ALL="yes"
|
||||
endif
|
||||
@@ -115,8 +117,8 @@ MAN8DIR=$(mandir)/man8
|
||||
|
||||
include $(top_builddir)/make.tmpl
|
||||
|
||||
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) \
|
||||
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM) *.gen man-generator
|
||||
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) \
|
||||
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM)
|
||||
DISTCLEAN_TARGETS+=$(FSADMMAN) $(BLKDEACTIVATEMAN) $(DMEVENTDMAN) \
|
||||
$(LVMETADMAN) $(LVMPOLLDMAN) $(LVMLOCKDMAN) $(CLVMDMAN) $(CMIRRORDMAN) \
|
||||
$(LVMCACHEMAN) $(LVMTHINMAN) $(LVMDBUSDMAN) $(LVMRAIDMAN)
|
||||
@@ -127,11 +129,11 @@ all: man device-mapper
|
||||
|
||||
device-mapper: $(MAN8DM)
|
||||
|
||||
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
|
||||
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
|
||||
|
||||
all_man: man
|
||||
|
||||
$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8DM) $(MAN8CLUSTER): Makefile
|
||||
$(MAN5) $(MAN7) $(MAN8) $(MAN8DM) $(MAN8CLUSTER): Makefile
|
||||
|
||||
Makefile: Makefile.in
|
||||
@:
|
||||
@@ -142,17 +144,12 @@ Makefile: Makefile.in
|
||||
*) echo "Creating $@" ; $(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $< > $@ ;; \
|
||||
esac
|
||||
|
||||
man-generator:
|
||||
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
|
||||
- ./man-generator lvmconfig > test.gen
|
||||
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
|
||||
|
||||
$(MAN8GEN): man-generator
|
||||
$(MAN8GEN):
|
||||
$(CC) -DMAN_PAGE_GENERATOR $(top_builddir)/tools/command.c -o man-generator
|
||||
echo "Generating $@" ;
|
||||
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
|
||||
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
|
||||
cat $(top_srcdir)/man/see_also.end >> $@.gen
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
|
||||
./man-generator `basename -s .8 $@` > $@.in
|
||||
if [ -f $@.notes ]; then cat $@.notes >> $@.in; fi;
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.in > $@
|
||||
|
||||
install_man5: $(MAN5)
|
||||
$(INSTALL) -d $(MAN5DIR)
|
||||
@@ -165,7 +162,6 @@ install_man7: $(MAN7)
|
||||
install_man8: $(MAN8) $(MAN8GEN)
|
||||
$(INSTALL) -d $(MAN8DIR)
|
||||
$(INSTALL_DATA) $(MAN8) $(MAN8DIR)/
|
||||
$(INSTALL_DATA) $(MAN8GEN) $(MAN8DIR)/
|
||||
|
||||
install_lvm2: install_man5 install_man7 install_man8
|
||||
|
||||
|
@@ -212,17 +212,6 @@ dmstats \(em device-mapper statistics management
|
||||
. ad b
|
||||
..
|
||||
.CMD_UNGROUP
|
||||
.HP
|
||||
.B dmstats
|
||||
.de CMD_UPDATE_FILEMAP
|
||||
. ad l
|
||||
. BR update_filemap
|
||||
. RI file_path
|
||||
. RB [ \-\-groupid
|
||||
. IR id ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_UPDATE_FILEMAP
|
||||
.
|
||||
.PD
|
||||
.ad b
|
||||
@@ -679,21 +668,6 @@ Remove an existing group and return all the group's regions to their
|
||||
original state.
|
||||
|
||||
The group to be removed is specified using \fB\-\-groupid\fP.
|
||||
.HP
|
||||
.CMD_UPDATE_FILEMAP
|
||||
.br
|
||||
Update a group of \fBdmstats\fP regions specified by \fBgroup_id\fP,
|
||||
that were previously created with \fB\-\-filemap\fP. This will add
|
||||
and remove regions to reflect changes in the allocated extents of
|
||||
the file on-disk, since the time that it was crated or last updated.
|
||||
|
||||
Use of this command is not normally needed since the \fBdmfilemapd\fP
|
||||
daemon will automatically monitor filemap groups and perform these
|
||||
updates when required.
|
||||
|
||||
If a filemapped group was created with \fB\-\-nominitor\fP, or the
|
||||
daemon has been killed, the \fBupdate_filemap\fP can be used to
|
||||
manually force an update.
|
||||
.
|
||||
.SH REGIONS, AREAS, AND GROUPS
|
||||
.
|
||||
|
@@ -1,2 +0,0 @@
|
||||
lvchange changes LV attributes in the VG, changes LV activation in the
|
||||
kernel, and includes other utilities for LV maintenance.
|
@@ -1,6 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Change LV permission to read-only:
|
||||
.sp
|
||||
.B lvchange \-pr vg00/lvol1
|
||||
|
@@ -1,38 +0,0 @@
|
||||
lvconvert changes the LV type and includes utilities for LV data
|
||||
maintenance. The LV type controls data layout and redundancy.
|
||||
The LV type is also called the segment type or segtype.
|
||||
|
||||
To display the current LV type, run the command:
|
||||
|
||||
.B lvs \-o name,segtype
|
||||
.I LV
|
||||
|
||||
The
|
||||
.B linear
|
||||
type is equivalent to the
|
||||
.B striped
|
||||
type when one stripe exists.
|
||||
In that case, the types can sometimes be used interchangably.
|
||||
|
||||
In most cases, the
|
||||
.B mirror
|
||||
type is deprecated and the
|
||||
.B raid1
|
||||
type should be used. They are both implementations of mirroring.
|
||||
|
||||
The
|
||||
.B raid*
|
||||
type refers to one of many raid levels, e.g.
|
||||
.B raid1,
|
||||
.B raid5.
|
||||
|
||||
In some cases, an LV is a single device mapper (dm) layer above physical
|
||||
devices. In other cases, hidden LVs (dm devices) are layered between the
|
||||
visible LV and physical devices. LVs in the middle layers are called sub LVs.
|
||||
A command run on a visible LV sometimes operates on a sub LV rather than
|
||||
the specified LV. In other cases, a sub LV must be specified directly on
|
||||
the command line.
|
||||
|
||||
Sub LVs can be displayed with the command
|
||||
.B lvs -a
|
||||
|
@@ -1,95 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Convert a linear LV to a two-way mirror LV.
|
||||
.br
|
||||
.B lvconvert \-\-type mirror \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way RAID1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a mirror LV to use an in\-memory log.
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog core vg/lvol1
|
||||
|
||||
Convert a mirror LV to use a disk log.
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog disk vg/lvol1
|
||||
|
||||
Convert a mirror or raid1 LV to a linear LV.
|
||||
.br
|
||||
.B lvconvert --type linear vg/lvol1
|
||||
|
||||
Convert a mirror LV to a raid1 LV with the same number of images.
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way mirror LV, allocating new extents from specific
|
||||
PV ranges.
|
||||
.br
|
||||
.B lvconvert \-\-mirrors 1 vg/lvol1 /dev/sda:0\-15 /dev/sdb:0\-15
|
||||
|
||||
Convert a mirror LV to a linear LV, freeing physical extents from a specific PV.
|
||||
.br
|
||||
.B lvconvert \-\-type linear vg/lvol1 /dev/sda
|
||||
|
||||
Split one image from a mirror or raid1 LV, making it a new LV.
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-name lv_split vg/lvol1
|
||||
|
||||
Split one image from a raid1 LV, and track changes made to the raid1 LV
|
||||
while the split image remains detached.
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-trackchanges vg/lvol1
|
||||
|
||||
Merge an image (that was previously created with \-\-splitmirrors and
|
||||
\-\-trackchanges) back into the original raid1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-mergemirrors vg/lvol1_rimage_1
|
||||
|
||||
Replace PV /dev/sdb1 with PV /dev/sdf1 in a raid1/4/5/6/10 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 vg/lvol1 /dev/sdf1
|
||||
|
||||
Replace 3 PVs /dev/sd[b-d]1 with PVs /dev/sd[f-h]1 in a raid1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 \-\-replace /dev/sdd1
|
||||
.RS
|
||||
.B vg/lvol1 /dev/sd[fgh]1
|
||||
.RE
|
||||
|
||||
Replace the maximum of 2 PVs /dev/sd[bc]1 with PVs /dev/sd[gh]1 in a raid6 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 vg/lvol1 /dev/sd[gh]1
|
||||
|
||||
Convert an LV into a thin LV in the specified thin pool. The existing LV
|
||||
is used as an external read\-only origin for the new thin LV.
|
||||
.br
|
||||
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1 vg/lvol1
|
||||
|
||||
Convert an LV into a thin LV in the specified thin pool. The existing LV
|
||||
is used as an external read\-only origin for the new thin LV, and is
|
||||
renamed "external".
|
||||
.br
|
||||
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1
|
||||
.RS
|
||||
.B \-\-originname external vg/lvol1
|
||||
.RE
|
||||
|
||||
Convert an LV to a cache pool LV using another specified LV for cache pool
|
||||
metadata.
|
||||
.br
|
||||
.B lvconvert \-\-type cache-pool \-\-poolmetadata vg/poolmeta1 vg/lvol1
|
||||
|
||||
Convert an LV to a cache LV using the specified cache pool and chunk size.
|
||||
.br
|
||||
.B lvconvert \-\-type cache \-\-cachepool vg/cpool1 \-c 128 vg/lvol1
|
||||
|
||||
Detach and keep the cache pool from a cache LV.
|
||||
.br
|
||||
.B lvconvert \-\-splitcache vg/lvol1
|
||||
|
||||
Detach and remove the cache pool from a cache LV.
|
||||
.br
|
||||
.B lvconvert \-\-uncache vg/lvol1
|
||||
|
@@ -1,28 +0,0 @@
|
||||
lvcreate creates a new LV in a VG. For standard LVs, this requires
|
||||
allocating logical extents from the VG's free physical extents. If there
|
||||
is not enough free space, then the VG can be extended (see
|
||||
\fBvgextend\fP(8)) with other PVs, or existing LVs can be reduced or
|
||||
removed (see \fBlvremove\fP, \fBlvreduce\fP.)
|
||||
|
||||
To control which PVs a new LV will use, specify one or more PVs as
|
||||
position args at the end of the command line. lvcreate will allocate
|
||||
physical extents only from the specified PVs.
|
||||
|
||||
lvcreate can also create snapshots of existing LVs, e.g. for backup
|
||||
purposes. The data in a new snapshot LV represents the content of the
|
||||
original LV from the time the snapshot was created.
|
||||
|
||||
RAID LVs can be created by specifying an LV type when creating the LV (see
|
||||
\fBlvmraid\fP(7)). Different RAID levels require different numbers of
|
||||
unique PVs be available in the VG for allocation.
|
||||
|
||||
Thin pools (for thin provisioning) and cache pools (for caching) are
|
||||
represented by special LVs with types thin-pool and cache-pool (see
|
||||
\fBlvmthin\fP(7) and \fBlvmcache\fP(7)). The pool LVs are not usable as
|
||||
standard block devices, but the LV names act references to the pools.
|
||||
|
||||
Thin LVs are thinly provisioned from a thin pool, and are created with a
|
||||
virtual size rather than a physical size. A cache LV is the combination of
|
||||
a standard LV with a cache pool, used to cache active portions of the LV
|
||||
to improve performance.
|
||||
|
@@ -1,98 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Create a striped LV with 3 stripes, a stripe size of 8KiB and a size of 100MiB.
|
||||
The LV name is chosen by lvcreate.
|
||||
.br
|
||||
.B lvcreate \-i 3 \-I 8 \-L 100m vg00
|
||||
|
||||
Create a raid1 LV with two images, and a useable size of 500 MiB. This
|
||||
operation requires two devices, one for each mirror image. RAID metadata
|
||||
(superblock and bitmap) is also included on the two devices.
|
||||
.br
|
||||
.B lvcreate \-\-type raid1 \-m1 \-L 500m \-n mylv vg00
|
||||
|
||||
Create a mirror LV with two images, and a useable size of 500 MiB.
|
||||
This operation requires three devices: two for mirror images and
|
||||
one for a disk log.
|
||||
.br
|
||||
.B lvcreate \-\-type mirror \-m1 \-L 500m \-n mylv vg00
|
||||
|
||||
Create a mirror LV with 2 images, and a useable size of 500 MiB.
|
||||
This operation requires 2 devices because the log is in memory.
|
||||
.br
|
||||
.B lvcreate \-\-type mirror \-m1 \-\-mirrorlog core \-L 500m \-n mylv vg00
|
||||
|
||||
Create a copy\-on\-write snapshot of an LV:
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-size 100m \-\-name mysnap vg00/mylv
|
||||
|
||||
Create a copy\-on\-write snapshot with a size sufficient
|
||||
for overwriting 20% of the size of the original LV.
|
||||
.br
|
||||
.B lvcreate \-s \-l 20%ORIGIN \-n mysnap vg00/mylv
|
||||
|
||||
Create a sparse LV with 1TiB of virtual space, and actual space just under
|
||||
100MiB.
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-virtualsize 1t \-\-size 100m \-\-name mylv vg00
|
||||
|
||||
Create a linear LV with a usable size of 64MiB on specific physical extents.
|
||||
.br
|
||||
.B lvcreate \-L 64m \-n mylv vg00 /dev/sda:0\-7 /dev/sdb:0\-7
|
||||
|
||||
Create a RAID5 LV with a usable size of 5GiB, 3 stripes, a stripe size of
|
||||
64KiB, using a total of 4 devices (including one for parity).
|
||||
.br
|
||||
.B lvcreate \-\-type raid5 \-L 5G \-i 3 \-I 64 \-n mylv vg00
|
||||
|
||||
Create a RAID5 LV using all of the free space in the VG and spanning all the
|
||||
PVs in the VG (note that the command will fail if there are more than 8 PVs in
|
||||
the VG, in which case \fB\-i 7\fP must be used to get to the current maximum of
|
||||
8 devices including parity for RaidLVs).
|
||||
.br
|
||||
.B lvcreate \-\-config allocation/raid_stripe_all_devices=1
|
||||
.RS
|
||||
.B \-\-type raid5 \-l 100%FREE \-n mylv vg00
|
||||
.RE
|
||||
|
||||
Create RAID10 LV with a usable size of 5GiB, using 2 stripes, each on
|
||||
a two-image mirror. (Note that the \fB-i\fP and \fB-m\fP arguments behave
|
||||
differently:
|
||||
\fB-i\fP specifies the total number of stripes,
|
||||
but \fB-m\fP specifies the number of images in addition
|
||||
to the first image).
|
||||
.br
|
||||
.B lvcreate \-\-type raid10 \-L 5G \-i 2 \-m 1 \-n mylv vg00
|
||||
|
||||
Create a 1TiB thin LV, first creating a new thin pool for it, where
|
||||
the thin pool has 100MiB of space, uses 2 stripes, has a 64KiB stripe
|
||||
size, and 256KiB chunk size.
|
||||
.br
|
||||
.B lvcreate \-\-type thin \-\-name mylv \-\-thinpool mypool
|
||||
.RS
|
||||
.B \-V 1t \-L 100m \-i 2 \-I 64 \-c 256 vg00
|
||||
.RE
|
||||
|
||||
Create a thin snapshot of a thin LV (the size option must not be
|
||||
used, otherwise a copy-on-write snapshot would be created).
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-name mysnap vg00/thinvol
|
||||
|
||||
Create a thin snapshot of the read-only inactive LV named "origin"
|
||||
which becomes an external origin for the thin snapshot LV.
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-name mysnap \-\-thinpool mypool vg00/origin
|
||||
|
||||
Create a cache pool from a fast physical device. The cache pool can
|
||||
then be used to cache an LV.
|
||||
.br
|
||||
.B lvcreate \-\-type cache-pool \-L 1G \-n my_cpool vg00 /dev/fast1
|
||||
|
||||
Create a cache LV, first creating a new origin LV on a slow physical device,
|
||||
then combining the new origin LV with an existing cache pool.
|
||||
.br
|
||||
.B lvcreate \-\-type cache \-\-cachepool my_cpool
|
||||
.RS
|
||||
.B \-L 100G \-n mylv vg00 /dev/slow1
|
||||
.RE
|
||||
|
@@ -1,5 +0,0 @@
|
||||
lvdisplay shows the attributes of LVs, like size, read/write status,
|
||||
snapshot information, etc.
|
||||
|
||||
\fBlvs\fP(8) is a preferred alternative that shows the same information
|
||||
and more, using a more compact and configurable output format.
|
@@ -1,5 +0,0 @@
|
||||
lvextend extends the size of an LV. This requires allocating logical
|
||||
extents from the VG's free physical extents. A copy\-on\-write snapshot LV
|
||||
can also be extended to provide more space to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
@@ -1,16 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Extend the size of an LV by 54MiB, using a specific PV.
|
||||
.br
|
||||
.B lvextend \-L +54 vg01/lvol10 /dev/sdk3
|
||||
|
||||
Extend the size of an LV by the amount of free
|
||||
space on PV /dev/sdk3. This is equivalent to specifying
|
||||
"\-l +100%PVS" on the command line.
|
||||
.br
|
||||
.B lvextend vg01/lvol01 /dev/sdk3
|
||||
|
||||
Extend an LV by 16MiB using specific physical extents.
|
||||
.br
|
||||
.B lvextend \-L+16m vg01/lvol01 /dev/sda:8\-9 /dev/sdb:8\-9
|
||||
|
@@ -1,6 +0,0 @@
|
||||
lvm fullreport produces formatted output about PVs, PV segments, VGs, LVs
|
||||
and LV segments. The information is all gathered together for each VG
|
||||
(under a per-VG lock) so it is consistent. Information gathered from
|
||||
separate calls to \fBvgs\fP, \fBpvs\fP, and \fBlvs\fP can be inconsistent
|
||||
if information changes between commands.
|
||||
|
@@ -1,4 +0,0 @@
|
||||
lvm lvpoll is an internal command used by \fBlvmpolld\fP(8) to monitor and
|
||||
complete \fBlvconvert\fP(8) and \fBpvmove\fP(8) operations. lvpoll itself
|
||||
does not initiate these operations and should not normally need to be run
|
||||
directly.
|
@@ -1,33 +0,0 @@
|
||||
.SH NOTES
|
||||
|
||||
To find the name of the pvmove LV that was created by an original
|
||||
\fBpvmove /dev/name\fP command, use the command:
|
||||
.br
|
||||
\fBlvs -a -S move_pv=/dev/name\fP.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Continue polling a pvmove operation.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove vg00/pvmove0
|
||||
|
||||
Abort a pvmove operation.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove --abort vg00/pvmove0
|
||||
|
||||
Continue polling a mirror conversion.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg00/lvmirror
|
||||
|
||||
Continue mirror repair.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg/damaged_mirror --handlemissingpvs
|
||||
|
||||
Continue snapshot merge.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge vg/snapshot_old
|
||||
|
||||
Continue thin snapshot merge.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge_thin vg/thin_snapshot
|
||||
|
@@ -416,11 +416,6 @@ File descriptor to use for report output from LVM commands.
|
||||
Name of default command profile to use for LVM commands. This profile
|
||||
is overriden by direct use of \fB\-\-commandprofile\fP command line option.
|
||||
.TP
|
||||
.B LVM_RUN_BY_DMEVENTD
|
||||
This variable is normally set by dmeventd plugin to inform lvm2 command
|
||||
it is running from dmeventd plugin so lvm2 takes some extra action
|
||||
to avoid comunication and deadlocks with dmeventd.
|
||||
.TP
|
||||
.B LVM_SYSTEM_DIR
|
||||
Directory containing \fBlvm.conf\fP(5) and other LVM system files.
|
||||
Defaults to "\fI#DEFAULT_SYS_DIR#\fP".
|
||||
|
@@ -1,3 +0,0 @@
|
||||
lvmconfig produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
@@ -1,7 +0,0 @@
|
||||
lvmdiskscan scans all SCSI, (E)IDE disks, multiple devices and a bunch of
|
||||
other block devices in the system looking for LVM PVs. The size reported
|
||||
is the real device size. Define a filter in \fBlvm.conf\fP(5) to restrict
|
||||
the scan to avoid a CD ROM, for example.
|
||||
|
||||
This command is deprecated, use \fBpvs\fP instead.
|
||||
|
@@ -1,14 +0,0 @@
|
||||
lvreduce reduces the size of an LV. The freed logical extents are returned
|
||||
to the VG to be used by other LVs. A copy\-on\-write snapshot LV can also
|
||||
be reduced if less space is needed to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
||||
|
||||
Be careful when reducing an LV's size, because data in the reduced area is
|
||||
lost. Ensure that any file system on the LV is resized \fBbefore\fP
|
||||
running lvreduce so that the removed extents are not in use by the file
|
||||
system.
|
||||
|
||||
Sizes will be rounded if necessary. For example, the LV size must be an
|
||||
exact number of extents, and the size of a striped segment must be a
|
||||
multiple of the number of stripes.
|
@@ -1,5 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Reduce the size of an LV by 3 logical extents:
|
||||
.br
|
||||
.B lvreduce \-l \-3 vg00/lvol1
|
@@ -1,22 +0,0 @@
|
||||
lvremove removes one or more LVs. For standard LVs, this returns the
|
||||
logical extents that were used by the LV to the VG for use by other LVs.
|
||||
|
||||
Confirmation will be requested before deactivating any active LV prior to
|
||||
removal. LVs cannot be deactivated or removed while they are open (e.g.
|
||||
if they contain a mounted filesystem). Removing an origin LV will also
|
||||
remove all dependent snapshots.
|
||||
|
||||
\fBHistorical LVs\fP
|
||||
|
||||
If the configuration setting \fBmetadata/record_lvs_history\fP is enabled
|
||||
and the LV being removed forms part of the history of at least one LV that
|
||||
is still present, then a simplified representation of the LV will be
|
||||
retained. This includes the time of removal (\fBlv_time_removed\fP
|
||||
reporting field), creation time (\fBlv_time\fP), name (\fBlv_name\fP), LV
|
||||
uuid (\fBlv_uuid\fP) and VG name (\fBvg_name\fP). This allows later
|
||||
reporting to see the ancestry chain of thin snapshot volumes, even after
|
||||
some intermediate LVs have been removed. The names of such historical LVs
|
||||
acquire a hyphen as a prefix (e.g. '-lvol1') and cannot be reactivated.
|
||||
Use lvremove a second time, with the hyphen, to remove the record of the
|
||||
former LV completely.
|
||||
|
@@ -1,11 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Remove an active LV without asking for confirmation.
|
||||
.br
|
||||
.B lvremove \-f vg00/lvol1
|
||||
|
||||
Remove all LVs the specified VG.
|
||||
.br
|
||||
.B lvremove vg00
|
||||
|
||||
|
@@ -1,2 +0,0 @@
|
||||
lvrename renames an existing LV or a historical LV (see \fBlvremove\fP for
|
||||
historical LV information.)
|
@@ -1,10 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Rename "lvold" to "lvnew":
|
||||
.br
|
||||
.B lvrename /dev/vg02/lvold vg02/lvnew
|
||||
|
||||
An alternate syntax to rename "lvold" to "lvnew":
|
||||
.br
|
||||
.B lvrename vg02 lvold lvnew
|
||||
|
@@ -1,2 +0,0 @@
|
||||
lvresize resizes an LV in the same way as lvextend and lvreduce. See
|
||||
\fBlvextend\fP(8) and \fBlvreduce\fP(8) for more information.
|
@@ -1,6 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Extend an LV by 16MB using specific physical extents:
|
||||
.br
|
||||
.B lvresize \-L+16M vg1/lv1 /dev/sda:0\-1 /dev/sdb:0\-1
|
||||
|
@@ -1 +0,0 @@
|
||||
lvs produces formatted output about LVs.
|
@@ -1,5 +0,0 @@
|
||||
lvscan scans all VGs or all supported LVM block devices in the system for
|
||||
LVs. The output consists of one line for each LV indicating whether or not
|
||||
it is active, a snapshot or origin, the size of the device and its
|
||||
allocation policy. Use \fBlvs\fP(8) or \fBlvdisplay\fP(8) to obtain more
|
||||
comprehensive information about LVs.
|
@@ -1 +0,0 @@
|
||||
pvchange changes PV attributes in the VG.
|
@@ -1,6 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Disallow the allocation of physical extents on a PV (e.g. because of
|
||||
disk errors, or because it will be removed after freeing it).
|
||||
.br
|
||||
.B pvchange \-x n /dev/sdk1
|
@@ -1 +0,0 @@
|
||||
pvck checks the LVM metadata for consistency on PVs.
|
@@ -1,8 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
If the partition table is corrupted or lost on /dev/sda, and you suspect
|
||||
there was an LVM partition at approximately 100 MiB, then this
|
||||
area of the disk can be scanned using the \fB\-\-labelsector\fP
|
||||
parameter with a value of 204800 (100 * 1024 * 1024 / 512 = 204800).
|
||||
.br
|
||||
.B pvck \-\-labelsector 204800 /dev/sda
|
@@ -1,18 +0,0 @@
|
||||
pvcreate initializes a PV so that it is recognized as belonging to LVM,
|
||||
and allows the PV to be used in a VG. A PV can be a disk partition, whole
|
||||
disk, meta device, or loopback file.
|
||||
|
||||
For DOS disk partitions, the partition id should be set to 0x8e using
|
||||
.BR fdisk (8),
|
||||
.BR cfdisk (8),
|
||||
or a equivalent. For GUID Partition Table (GPT), the id is
|
||||
E6D6D379-F507-44C2-A23C-238F2A3DF928. For
|
||||
whole disk devices only
|
||||
the partition table must be erased, which will effectively destroy all
|
||||
data on that disk. This can be done by zeroing the first sector with:
|
||||
|
||||
.BI "dd if=/dev/zero of=" PhysicalVolume " bs=512 count=1"
|
||||
|
||||
Use \fBvgcreate\fP(8) to create a new VG on the PV, or \fBvgextend\fP(8)
|
||||
to add the PV to existing VG.
|
||||
|
@@ -1,13 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Initialize a partition and a full device.
|
||||
.br
|
||||
.B pvcreate /dev/sdc4 /dev/sde
|
||||
|
||||
If a device is a 4KiB sector drive that compensates for windows
|
||||
partitioning (sector 7 is the lowest aligned logical block, the 4KiB
|
||||
sectors start at LBA -1, and consequently sector 63 is aligned on a 4KiB
|
||||
boundary) manually account for this when initializing for use by LVM.
|
||||
.br
|
||||
.B pvcreate \-\-dataalignmentoffset 7s /dev/sdb
|
||||
|
@@ -1,5 +0,0 @@
|
||||
pvdisplay shows the attributes of PVs, like size, physical extent size,
|
||||
space used for the VG descriptor area, etc.
|
||||
|
||||
\fBpvs\fP(8) is a preferred alternative that shows the same information
|
||||
and more, using a more compact and configurable output format.
|
@@ -1,46 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Move all physical extents that are used by simple LVs on the specified PV to
|
||||
free physical extents elsewhere in the VG.
|
||||
.br
|
||||
.B pvmove /dev/sdb1
|
||||
|
||||
Use a specific destination PV when moving physical extents.
|
||||
.br
|
||||
.B pvmove /dev/sdb1 /dev/sdc1
|
||||
|
||||
Move extents belonging to a single LV.
|
||||
.br
|
||||
.B pvmove \-n lvol1 /dev/sdb1 /dev/sdc1
|
||||
|
||||
Rather than moving the contents of an entire device, it is possible to
|
||||
move a range of physical extents, for example numbers 1000 to 1999
|
||||
inclusive on the specified PV.
|
||||
.br
|
||||
.B pvmove /dev/sdb1:1000\-1999
|
||||
|
||||
A range of physical extents to move can be specified as start+length. For
|
||||
example, starting from PE 1000. (Counting starts from 0, so this refers to the
|
||||
1001st to the 2000th PE inclusive.)
|
||||
.br
|
||||
.B pvmove /dev/sdb1:1000+1000
|
||||
|
||||
Move a range of physical extents to a specific PV (which must have
|
||||
sufficient free extents).
|
||||
.br
|
||||
.B pvmove /dev/sdb1:1000\-1999 /dev/sdc1
|
||||
|
||||
Move a range of physical extents to specific new extents on a new PV.
|
||||
.br
|
||||
.B pvmove /dev/sdb1:1000\-1999 /dev/sdc1:0\-999
|
||||
|
||||
If the source and destination are on the same disk, the
|
||||
\fBanywhere\fP allocation policy is needed.
|
||||
.br
|
||||
.B pvmove \-\-alloc anywhere /dev/sdb1:1000\-1999 /dev/sdb1:0\-999
|
||||
|
||||
The part of a specific LV present within in a range of physical
|
||||
extents can also be picked out and moved.
|
||||
.br
|
||||
.B pvmove \-n lvol1 /dev/sdb1:1000\-1999 /dev/sdc1
|
||||
|
@@ -1,20 +1,6 @@
|
||||
pvmove moves the allocated physical extents (PEs) on a source PV to one or
|
||||
more destination PVs. You can optionally specify a source LV in which
|
||||
case only extents used by that LV will be moved to free (or specified)
|
||||
extents on the destination PV. If no destination PV is specified, the
|
||||
normal allocation rules for the VG are used.
|
||||
|
||||
If pvmove is interrupted for any reason (e.g. the machine crashes) then
|
||||
run pvmove again without any PV arguments to restart any operations that
|
||||
were in progress from the last checkpoint. Alternatively, use the abort
|
||||
option at any time to abort the operation. The resulting location of LVs
|
||||
after an abort depends on whether the atomic option was used.
|
||||
|
||||
More than one pvmove can run concurrently if they are moving data from
|
||||
different source PVs, but additional pvmoves will ignore any LVs already
|
||||
in the process of being changed, so some data might not get moved.
|
||||
|
||||
pvmove works as follows:
|
||||
.SH NOTES
|
||||
.
|
||||
\fBpvmove\fP works as follows:
|
||||
|
||||
1. A temporary 'pvmove' LV is created to store details of all the data
|
||||
movements required.
|
@@ -1,2 +0,0 @@
|
||||
pvremove wipes the label on a device so that LVM will no longer recognise
|
||||
it as a PV.
|
@@ -1,2 +0,0 @@
|
||||
pvresize resizes a PV. The PV may already be in a VG and may have active
|
||||
LVs allocated on it.
|
@@ -1,16 +0,0 @@
|
||||
.SH NOTES
|
||||
|
||||
pvresize will refuse to shrink a PV if it has allocated extents beyond the
|
||||
new end.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Expand a PV after enlarging the partition.
|
||||
.br
|
||||
.B pvresize /dev/sda1
|
||||
|
||||
Shrink a PV prior to shrinking the partition (ensure that the PV size is
|
||||
appropriate for the intended new partition size).
|
||||
.br
|
||||
.B pvresize \-\-setphysicalvolumesize 40G /dev/sda1
|
||||
|
@@ -1 +0,0 @@
|
||||
pvs produces formatted output about PVs.
|
@@ -1,6 +1,6 @@
|
||||
pvscan scans all supported LVM block devices in the system for PVs.
|
||||
.SH NOTES
|
||||
|
||||
\fBScanning with lvmetad\fP
|
||||
.SS Scanning with lvmetad
|
||||
|
||||
pvscan operates differently when used with the
|
||||
.BR lvmetad (8)
|
||||
@@ -64,9 +64,7 @@ be temporarily disabled if they are seen.
|
||||
To notify lvmetad about a device that is no longer present, the major and
|
||||
minor numbers must be given, not the path.
|
||||
|
||||
.P
|
||||
|
||||
\fBAutomatic activation\fP
|
||||
.SS Automatic activation
|
||||
|
||||
When event-driven system services detect a new LVM device, the first step
|
||||
is to automatically scan and cache the metadata from the device. This is
|
@@ -1,67 +0,0 @@
|
||||
.SH SEE ALSO
|
||||
|
||||
.BR lvm (8)
|
||||
.BR lvm.conf (5)
|
||||
.BR lvmconfig (8)
|
||||
|
||||
.BR pvchange (8)
|
||||
.BR pvck (8)
|
||||
.BR pvcreate (8)
|
||||
.BR pvdisplay (8)
|
||||
.BR pvmove (8)
|
||||
.BR pvremove (8)
|
||||
.BR pvresize (8)
|
||||
.BR pvs (8)
|
||||
.BR pvscan (8)
|
||||
|
||||
.BR vgcfgbackup (8)
|
||||
.BR vgcfgrestore (8)
|
||||
.BR vgchange (8)
|
||||
.BR vgck (8)
|
||||
.BR vgcreate (8)
|
||||
.BR vgconvert (8)
|
||||
.BR vgdisplay (8)
|
||||
.BR vgexport (8)
|
||||
.BR vgextend (8)
|
||||
.BR vgimport (8)
|
||||
.BR vgimportclone (8)
|
||||
.BR vgmerge (8)
|
||||
.BR vgmknodes (8)
|
||||
.BR vgreduce (8)
|
||||
.BR vgremove (8)
|
||||
.BR vgrename (8)
|
||||
.BR vgs (8)
|
||||
.BR vgscan (8)
|
||||
.BR vgsplit (8)
|
||||
|
||||
.BR lvcreate (8)
|
||||
.BR lvchange (8)
|
||||
.BR lvconvert (8)
|
||||
.BR lvdisplay (8)
|
||||
.BR lvextend (8)
|
||||
.BR lvreduce (8)
|
||||
.BR lvremove (8)
|
||||
.BR lvrename (8)
|
||||
.BR lvresize (8)
|
||||
.BR lvs (8)
|
||||
.BR lvscan (8)
|
||||
|
||||
.BR lvm2-activation-generator (8)
|
||||
.BR blkdeactivate (8)
|
||||
.BR lvmdump (8)
|
||||
|
||||
.BR dmeventd (8)
|
||||
.BR lvmetad (8)
|
||||
.BR lvmpolld (8)
|
||||
.BR lvmlockd (8)
|
||||
.BR lvmlockctl (8)
|
||||
.BR clvmd (8)
|
||||
.BR cmirrord (8)
|
||||
.BR lvmdbusd (8)
|
||||
|
||||
.BR lvmsystemid (7)
|
||||
.BR lvmreport (7)
|
||||
.BR lvmraid (7)
|
||||
.BR lvmthin (7)
|
||||
.BR lvmcache (7)
|
||||
|
@@ -1,16 +0,0 @@
|
||||
vgcfgbackup creates back up files containing metadata of VGs.
|
||||
If no VGs are named, back up files are created for all VGs.
|
||||
See \fBvgcfgrestore\fP for information on using the back up
|
||||
files.
|
||||
|
||||
In a default installation, each VG is backed up into a separate file
|
||||
bearing the name of the VG in the directory \fI#DEFAULT_BACKUP_DIR#\fP.
|
||||
|
||||
To use an alternative back up file, use \fB\-f\fP. In this case, when
|
||||
backing up multiple VGs, the file name is treated as a template, with %s
|
||||
replaced by the VG name.
|
||||
|
||||
NB. This DOES NOT back up the data content of LVs.
|
||||
|
||||
It may also be useful to regularly back up the files in
|
||||
\fI#DEFAULT_SYS_DIR#\fP.
|
@@ -1,8 +0,0 @@
|
||||
vgcfgrestore restores the metadata of a VG from a text back up file
|
||||
produced by \fBvgcfgbackup\fP. This writes VG metadata onto the devices
|
||||
specifed in back up file.
|
||||
|
||||
A back up file can be specified with \fB\-\-file\fP. If no backup file is
|
||||
specified, the most recent one is used. Use \fB\-\-list\fP for a list of
|
||||
the available back up and archive files of a VG.
|
||||
|
@@ -1,9 +0,0 @@
|
||||
.SH NOTES
|
||||
|
||||
To replace PVs, \fBvgdisplay \-\-partial \-\-verbose\fP will show the
|
||||
UUIDs and sizes of any PVs that are no longer present. If a PV in the VG
|
||||
is lost and you wish to substitute another of the same size, use
|
||||
\fBpvcreate \-\-restorefile filename \-\-uuid uuid\fP (plus additional
|
||||
arguments as appropriate) to initialise it with the same UUID as the
|
||||
missing PV. Repeat for all other missing PVs in the VG. Then use
|
||||
\fBvgcfgrestore \-\-file filename\fP to restore the VG's metadata.
|
@@ -1,2 +0,0 @@
|
||||
vgchange changes VG attributes, changes LV activation in the kernel, and
|
||||
includes other utilities for VG maintenance.
|
@@ -1,16 +0,0 @@
|
||||
.SH NOTES
|
||||
|
||||
If vgchange recognizes COW snapshot LVs that were dropped because they ran
|
||||
out of space, it displays a message informing the administrator that the
|
||||
snapshots should be removed.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Activate all LVs in all VGs on all existing devices.
|
||||
.br
|
||||
.B vgchange \-a y
|
||||
|
||||
Change the maximum number of LVs for an inactive VG.
|
||||
.br
|
||||
.B vgchange \-l 128 vg00
|
||||
|
@@ -1 +0,0 @@
|
||||
vgck checks LVM metadata for consistency.
|
@@ -1,7 +0,0 @@
|
||||
vgconvert converts VG metadata from one format to another. The new
|
||||
metadata format must be able to fit into the space provided by the old
|
||||
format.
|
||||
|
||||
Because the LVM1 format should no longer be used, this command is no
|
||||
longer needed in general.
|
||||
|
@@ -1,4 +0,0 @@
|
||||
vgcreate creates a new VG on block devices. If the devices were not
|
||||
previously intialized as PVs with \fBpvcreate\fP(8), vgcreate will
|
||||
inititialize them, making them PVs. The pvcreate options for initializing
|
||||
devices are also available with vgcreate.
|
@@ -1,6 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Create a VG with two PVs, using the default physical extent size.
|
||||
.br
|
||||
.B vgcreate myvg /dev/sdk1 /dev/sdl1
|
||||
|
@@ -1,4 +0,0 @@
|
||||
vgdisplay shows the attributes of VGs, and the associated PVs and LVs.
|
||||
|
||||
\fBvgs\fP(8) is a preferred alternative that shows the same information
|
||||
and more, using a more compact and configurable output format.
|
@@ -1,8 +1,14 @@
|
||||
vgexport makes inactive VGs unknown to the system. In this state, all the
|
||||
PVs in the VG can be moved to a different system, from which
|
||||
.SH NOTES
|
||||
.
|
||||
.IP \[bu] 3
|
||||
vgexport can make inactive VG(s) unknown to the system. In this state,
|
||||
all the PVs in the VG can be moved to a different system, from which
|
||||
\fBvgimport\fP can then be run.
|
||||
|
||||
.IP \[bu] 3
|
||||
Most LVM tools ignore exported VGs.
|
||||
|
||||
.IP \[bu] 3
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID to
|
||||
match the host running vgimport (if the host has a system ID).
|
||||
|
@@ -1,11 +0,0 @@
|
||||
vgextend adds one or more PVs to a VG. This increases the space available
|
||||
for LVs in the VG.
|
||||
|
||||
Also, PVs that have gone missing and then returned, e.g. due to a
|
||||
transient device failure, can be added back to the VG without
|
||||
re-initializing them (see \-\-restoremissing).
|
||||
|
||||
If the specified PVs have not yet been initialized with pvcreate, vgextend
|
||||
will initialize them. In this case pvcreate options can be used, e.g.
|
||||
\-\-labelsector, \-\-metadatasize, \-\-metadataignore,
|
||||
\-\-pvmetadatacopies, \-\-dataalignment, \-\-dataalignmentoffset.
|
@@ -1,6 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Add two PVs to a VG.
|
||||
.br
|
||||
.B vgextend vg00 /dev/sda4 /dev/sdn1
|
||||
|
@@ -1,5 +0,0 @@
|
||||
vgimport makes exported VGs known to the system again, perhaps after
|
||||
moving the PVs from a different system.
|
||||
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID to
|
||||
match the host running vgimport (if the host has a system ID).
|
9
man/vgimport.8.notes
Normal file
9
man/vgimport.8.notes
Normal file
@@ -0,0 +1,9 @@
|
||||
.SH NOTES
|
||||
.
|
||||
.IP \[bu] 3
|
||||
vgimport makes exported VG(s) known to the system again, perhaps
|
||||
after moving the PVs from a different system.
|
||||
|
||||
.IP \[bu] 3
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID
|
||||
to match the host running vgimport (if the host has a system ID).
|
@@ -1,6 +0,0 @@
|
||||
vgimportclone imports a VG from duplicated PVs, e.g. created by a hardware
|
||||
snapshot of existing PVs.
|
||||
|
||||
A duplicated VG cannot used until it is made to coexist with the original
|
||||
VG. vgimportclone renames the VG associated with the specified PVs and
|
||||
changes the associated VG and PV UUIDs.
|
@@ -1,9 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
An original VG "vg00" has PVs "/dev/sda" and "/dev/sdb".
|
||||
The corresponding PVs from a hardware snapshot are "/dev/sdc" and "/dev/sdd".
|
||||
Rename the VG associated with "/dev/sdc" and "/dev/sdd" from "vg00" to "vg00_snap"
|
||||
(and change associated UUIDs).
|
||||
.br
|
||||
.B vgimportclone \-\-basevgname vg00_snap /dev/sdc /dev/sdd
|
||||
|
9
man/vgimportclone.8.notes
Normal file
9
man/vgimportclone.8.notes
Normal file
@@ -0,0 +1,9 @@
|
||||
.SH NOTES
|
||||
.
|
||||
vgimportclone can be used to import a VG from duplicated PVs (e.g. created
|
||||
by a hardware snapshot of the PV devices).
|
||||
|
||||
A duplicated VG cannot used until it is made to coexist with the original
|
||||
VG. vgimportclone renames the VG associated with the specified PVs and
|
||||
changes the associated VG and PV UUIDs.
|
||||
|
@@ -1,3 +0,0 @@
|
||||
vgmerge merges two existing VGs. The inactive source VG is merged into the
|
||||
destination VG if physical extent sizes are equal and PV and LV summaries
|
||||
of both VGs fit into the destination VG's limits.
|
@@ -1,7 +0,0 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Merge an inactive VG named "vg00" into the active or inactive VG named
|
||||
"databases", giving verbose runtime information.
|
||||
.br
|
||||
.B vgmerge \-v databases vg00
|
||||
|
@@ -1,5 +0,0 @@
|
||||
vgmknodes checks the LVM device nodes in /dev that are needed for active
|
||||
LVs and creates any that are missing and removes unused ones.
|
||||
|
||||
This command should not usually be needed if all the system components are
|
||||
interoperating correctly.
|
@@ -1 +0,0 @@
|
||||
vgreduce removes one or more unused PVs from a VG.
|
@@ -1,6 +0,0 @@
|
||||
vgremove removes one or more VGs. If LVs exist in the VG, a prompt is used
|
||||
to confirm LV removal.
|
||||
|
||||
If one or more PVs in the VG are lost, consider
|
||||
\fBvgreduce \-\-removemissing\fP to make the VG
|
||||
metadata consistent again.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user