mirror of
git://sourceware.org/git/lvm2.git
synced 2025-10-06 11:33:14 +03:00
Compare commits
51 Commits
dev-lvmguy
...
dev-dct-cm
Author | SHA1 | Date | |
---|---|---|---|
|
2634b35ff4 | ||
|
3364166a6c | ||
|
20b3e8153c | ||
|
485e2999db | ||
|
fec9641436 | ||
|
5b733c953e | ||
|
985d73b03d | ||
|
24cf2a6127 | ||
|
0d929d5a3d | ||
|
c8945374d5 | ||
|
1f67df932d | ||
|
1bef87796f | ||
|
cfbbb4bee9 | ||
|
058b53cdf4 | ||
|
2b8019fd43 | ||
|
c213aa76c4 | ||
|
15e69fae19 | ||
|
fe0ad1157d | ||
|
88a2b1dc37 | ||
|
03970369c5 | ||
|
c2147bda74 | ||
|
a61bc922a9 | ||
|
d41a2e6f30 | ||
|
55d6e9ebb7 | ||
|
0a61ea395f | ||
|
e61f84cd1f | ||
|
ca8e344041 | ||
|
14838f584f | ||
|
6033e894e2 | ||
|
9b4c2027bf | ||
|
61e0925844 | ||
|
214f48aa96 | ||
|
29ee87987c | ||
|
2391324196 | ||
|
e8230719ee | ||
|
2c5f6b8a02 | ||
|
fe3b2f5ead | ||
|
91a1273d56 | ||
|
5507307b5f | ||
|
91b71a8b02 | ||
|
3057fe9fec | ||
|
258027d1b1 | ||
|
2da0f6110d | ||
|
09d6229350 | ||
|
6312e0872c | ||
|
c752f4e1eb | ||
|
cd36b1c698 | ||
|
310d9a09fd | ||
|
81e84556b5 | ||
|
c903d0e4b3 | ||
|
2da7b10988 |
@@ -59,8 +59,6 @@ liblvm: lib
|
||||
daemons: lib libdaemon tools
|
||||
tools: lib libdaemon device-mapper
|
||||
po: tools daemons
|
||||
man: tools
|
||||
all_man: tools
|
||||
scripts: liblvm libdm
|
||||
|
||||
lib.device-mapper: include.device-mapper
|
||||
|
14
WHATS_NEW
14
WHATS_NEW
@@ -1,19 +1,5 @@
|
||||
Version 2.02.169 -
|
||||
=====================================
|
||||
Add lvconvert --swapmetadata, new specific way to swap pool metadata LVs.
|
||||
Add lvconvert --startpoll, new specific way to start polling conversions.
|
||||
Add lvconvert --mergethin, new specific way to merge thin snapshots.
|
||||
Add lvconvert --mergemirrors, new specific way to merge split mirrors.
|
||||
Add lvconvert --mergesnapshot, new specific way to combine cow LVs.
|
||||
Split up lvconvert code based on command definitions.
|
||||
Split up lvchange code based on command definitions.
|
||||
Generate help output and man pages from command definitions.
|
||||
Verify all command line items against command definition.
|
||||
Match every command run to one command definition.
|
||||
Specify every allowed command definition/syntax in command-lines.in.
|
||||
Add extra memory page when limiting pthread stack size in clvmd.
|
||||
Support striped/raid0* <-> raid10_near conversions
|
||||
Support shrinking of RaidLvs
|
||||
Support region size changes on existing RaidLVs
|
||||
Avoid parallel usage of cpg_mcast_joined() in clvmd with corosync.
|
||||
Support raid6_{ls,rs,la,ra}_6 segment types and conversions from/to it.
|
||||
|
@@ -1,8 +1,5 @@
|
||||
Version 1.02.138 -
|
||||
=====================================
|
||||
Add extra memory page when limiting pthread stack size in dmeventd.
|
||||
Avoids immediate resume when preloaded device is smaller.
|
||||
Do not suppress kernel key description in dmsetup table output.
|
||||
Support configurable command executed from dmeventd thin plugin.
|
||||
Support new R|r human readable units output format.
|
||||
Thin dmeventd plugin reacts faster on lvextend failure path with umount.
|
||||
|
@@ -2054,7 +2054,6 @@ dmeventd {
|
||||
# or metadata volume gets above 50%.
|
||||
# Command which starts with 'lvm ' prefix is internal lvm command.
|
||||
# You can write your own handler to customise behaviour in more details.
|
||||
# User handler is specified with the full path starting with '/'.
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_command = "lvm lvextend --use-policies"
|
||||
|
||||
|
198
configure
vendored
198
configure
vendored
@@ -821,8 +821,6 @@ HAVE_PIE
|
||||
POW_LIB
|
||||
LIBOBJS
|
||||
ALLOCA
|
||||
SORT
|
||||
WC
|
||||
CHMOD
|
||||
CSCOPE_CMD
|
||||
CFLOW_CMD
|
||||
@@ -5236,202 +5234,6 @@ else
|
||||
CHMOD="$ac_cv_path_CHMOD"
|
||||
fi
|
||||
|
||||
if test -n "$ac_tool_prefix"; then
|
||||
# Extract the first word of "${ac_tool_prefix}wc", so it can be a program name with args.
|
||||
set dummy ${ac_tool_prefix}wc; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_WC+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $WC in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_WC="$WC" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_WC="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
WC=$ac_cv_path_WC
|
||||
if test -n "$WC"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $WC" >&5
|
||||
$as_echo "$WC" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
fi
|
||||
if test -z "$ac_cv_path_WC"; then
|
||||
ac_pt_WC=$WC
|
||||
# Extract the first word of "wc", so it can be a program name with args.
|
||||
set dummy wc; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_ac_pt_WC+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $ac_pt_WC in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_ac_pt_WC="$ac_pt_WC" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_ac_pt_WC="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
ac_pt_WC=$ac_cv_path_ac_pt_WC
|
||||
if test -n "$ac_pt_WC"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_WC" >&5
|
||||
$as_echo "$ac_pt_WC" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
if test "x$ac_pt_WC" = x; then
|
||||
WC=""
|
||||
else
|
||||
case $cross_compiling:$ac_tool_warned in
|
||||
yes:)
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
|
||||
$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
|
||||
ac_tool_warned=yes ;;
|
||||
esac
|
||||
WC=$ac_pt_WC
|
||||
fi
|
||||
else
|
||||
WC="$ac_cv_path_WC"
|
||||
fi
|
||||
|
||||
if test -n "$ac_tool_prefix"; then
|
||||
# Extract the first word of "${ac_tool_prefix}sort", so it can be a program name with args.
|
||||
set dummy ${ac_tool_prefix}sort; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_SORT+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $SORT in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_SORT="$SORT" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_SORT="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
SORT=$ac_cv_path_SORT
|
||||
if test -n "$SORT"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $SORT" >&5
|
||||
$as_echo "$SORT" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
fi
|
||||
if test -z "$ac_cv_path_SORT"; then
|
||||
ac_pt_SORT=$SORT
|
||||
# Extract the first word of "sort", so it can be a program name with args.
|
||||
set dummy sort; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_ac_pt_SORT+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $ac_pt_SORT in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_ac_pt_SORT="$ac_pt_SORT" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_ac_pt_SORT="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
ac_pt_SORT=$ac_cv_path_ac_pt_SORT
|
||||
if test -n "$ac_pt_SORT"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_SORT" >&5
|
||||
$as_echo "$ac_pt_SORT" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
if test "x$ac_pt_SORT" = x; then
|
||||
SORT=""
|
||||
else
|
||||
case $cross_compiling:$ac_tool_warned in
|
||||
yes:)
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
|
||||
$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
|
||||
ac_tool_warned=yes ;;
|
||||
esac
|
||||
SORT=$ac_pt_SORT
|
||||
fi
|
||||
else
|
||||
SORT="$ac_cv_path_SORT"
|
||||
fi
|
||||
|
||||
|
||||
################################################################################
|
||||
ac_header_dirent=no
|
||||
|
@@ -86,8 +86,6 @@ AC_PROG_RANLIB
|
||||
AC_PATH_TOOL(CFLOW_CMD, cflow)
|
||||
AC_PATH_TOOL(CSCOPE_CMD, cscope)
|
||||
AC_PATH_TOOL(CHMOD, chmod)
|
||||
AC_PATH_TOOL(WC, wc)
|
||||
AC_PATH_TOOL(SORT, sort)
|
||||
|
||||
################################################################################
|
||||
dnl -- Check for header files.
|
||||
|
@@ -517,7 +517,7 @@ int main(int argc, char *argv[])
|
||||
/* Initialise the LVM thread variables */
|
||||
dm_list_init(&lvm_cmd_head);
|
||||
if (pthread_attr_init(&stack_attr) ||
|
||||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE + getpagesize())) {
|
||||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE)) {
|
||||
log_sys_error("pthread_attr_init", "");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -468,7 +468,7 @@ static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *
|
||||
/*
|
||||
* We use a smaller stack since it gets preallocated in its entirety
|
||||
*/
|
||||
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE + getpagesize());
|
||||
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
|
||||
|
||||
/*
|
||||
* If no-one will be waiting, we need to detach.
|
||||
|
@@ -184,12 +184,16 @@ int register_device(const char *device,
|
||||
goto_bad;
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
|
||||
"lvscan --cache", device))
|
||||
"lvscan --cache", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
|
||||
"lvconvert --repair --use-policies", device))
|
||||
"lvconvert --repair --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
*user = state;
|
||||
|
||||
@@ -199,9 +203,6 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor mirror %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -140,8 +140,10 @@ int register_device(const char *device,
|
||||
"lvscan --cache", device) ||
|
||||
!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
|
||||
"lvconvert --config devices{ignore_suspended_devices=1} "
|
||||
"--repair --use-policies", device))
|
||||
"--repair --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
*user = state;
|
||||
|
||||
@@ -151,9 +153,6 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor RAID %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -254,8 +254,10 @@ int register_device(const char *device,
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend,
|
||||
sizeof(state->cmd_lvextend),
|
||||
"lvextend --use-policies", device))
|
||||
"lvextend --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
state->percent_check = CHECK_MINIMUM;
|
||||
*user = state;
|
||||
@@ -266,9 +268,6 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor snapshot %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -18,6 +18,7 @@
|
||||
|
||||
#include <sys/wait.h>
|
||||
#include <stdarg.h>
|
||||
#include <pthread.h>
|
||||
|
||||
/* TODO - move this mountinfo code into library to be reusable */
|
||||
#ifdef __linux__
|
||||
@@ -58,8 +59,8 @@ struct dso_state {
|
||||
int restore_sigset;
|
||||
sigset_t old_sigset;
|
||||
pid_t pid;
|
||||
char *argv[3];
|
||||
char *cmd_str;
|
||||
char **argv;
|
||||
char cmd_str[1024];
|
||||
};
|
||||
|
||||
DM_EVENT_LOG_FN("thin")
|
||||
@@ -85,7 +86,7 @@ static int _run_command(struct dso_state *state)
|
||||
} else {
|
||||
/* For an error event it's for a user to check status and decide */
|
||||
env[1] = NULL;
|
||||
log_debug("Error event processing.");
|
||||
log_debug("Error event processing");
|
||||
}
|
||||
|
||||
log_verbose("Executing command: %s", state->cmd_str);
|
||||
@@ -115,7 +116,7 @@ static int _use_policy(struct dm_task *dmt, struct dso_state *state)
|
||||
#if THIN_DEBUG
|
||||
log_debug("dmeventd executes: %s.", state->cmd_str);
|
||||
#endif
|
||||
if (state->argv[0])
|
||||
if (state->argv)
|
||||
return _run_command(state);
|
||||
|
||||
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
|
||||
@@ -352,41 +353,34 @@ int register_device(const char *device,
|
||||
void **user)
|
||||
{
|
||||
struct dso_state *state;
|
||||
int maxcmd;
|
||||
char *str;
|
||||
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
|
||||
|
||||
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
|
||||
goto_bad;
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str),
|
||||
"_dmeventd_thin_command", device))
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
|
||||
sizeof(state->cmd_str),
|
||||
"_dmeventd_thin_command", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
if (strncmp(cmd_str, "lvm ", 4) == 0) {
|
||||
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
|
||||
log_error("Failed to copy lvm command.");
|
||||
goto bad;
|
||||
}
|
||||
} else if (cmd_str[0] == '/') {
|
||||
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
|
||||
log_error("Failed to copy thin command.");
|
||||
if (strncmp(state->cmd_str, "lvm ", 4)) {
|
||||
maxcmd = 2; /* space for last NULL element */
|
||||
for (str = state->cmd_str; *str; str++)
|
||||
if (*str == ' ')
|
||||
maxcmd++;
|
||||
if (!(str = dm_pool_strdup(state->mem, state->cmd_str)) ||
|
||||
!(state->argv = dm_pool_zalloc(state->mem, maxcmd * sizeof(char *)))) {
|
||||
log_error("Failed to allocate memory for command.");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
/* Find last space before 'vg/lv' */
|
||||
if (!(str = strrchr(state->cmd_str, ' ')))
|
||||
goto inval;
|
||||
|
||||
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
|
||||
str - state->cmd_str))) {
|
||||
log_error("Failed to copy command.");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
state->argv[1] = str + 1; /* 1 argument - vg/lv */
|
||||
dm_split_words(str, maxcmd - 1, 0, state->argv);
|
||||
_init_thread_signals(state);
|
||||
} else /* Unuspported command format */
|
||||
goto inval;
|
||||
} else
|
||||
memmove(state->cmd_str, state->cmd_str + 4, strlen(state->cmd_str + 4) + 1);
|
||||
|
||||
state->pid = -1;
|
||||
*user = state;
|
||||
@@ -394,14 +388,9 @@ int register_device(const char *device,
|
||||
log_info("Monitoring thin pool %s.", device);
|
||||
|
||||
return 1;
|
||||
inval:
|
||||
log_error("Invalid command for monitoring: %s.", cmd_str);
|
||||
bad:
|
||||
log_error("Failed to monitor thin pool %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -19,12 +19,10 @@
|
||||
|
||||
#define MIN_ARGV_SIZE 8
|
||||
|
||||
static const char *const polling_ops[] = {
|
||||
[PVMOVE] = LVMPD_REQ_PVMOVE,
|
||||
[CONVERT] = LVMPD_REQ_CONVERT,
|
||||
[MERGE] = LVMPD_REQ_MERGE,
|
||||
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN
|
||||
};
|
||||
static const char *const const polling_ops[] = { [PVMOVE] = LVMPD_REQ_PVMOVE,
|
||||
[CONVERT] = LVMPD_REQ_CONVERT,
|
||||
[MERGE] = LVMPD_REQ_MERGE,
|
||||
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN };
|
||||
|
||||
const char *polling_op(enum poll_type type)
|
||||
{
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -272,18 +272,10 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
return 0;
|
||||
@@ -992,30 +984,6 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
|
||||
}
|
||||
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid data offset and dev sectors for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!(r = dev_manager_raid_status(dm, lv, &status)))
|
||||
stack;
|
||||
|
||||
*data_offset = status->data_offset;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
int r;
|
||||
@@ -1045,32 +1013,6 @@ int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
*dev_cnt = 0;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid device count for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!dev_manager_raid_status(dm, lv, &status)) {
|
||||
dev_manager_destroy(dm);
|
||||
return_0;
|
||||
}
|
||||
*dev_cnt = status->dev_count;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
@@ -2006,13 +1948,16 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
|
||||
|
||||
/* Check [un]monitor results */
|
||||
/* Try a couple times if pending, but not forever... */
|
||||
for (i = 0;; i++) {
|
||||
for (i = 0; i < 40; i++) {
|
||||
pending = 0;
|
||||
monitored = seg->segtype->ops->target_monitored(seg, &pending);
|
||||
if (!pending || i >= 40)
|
||||
if (pending ||
|
||||
(!monitored && monitor) ||
|
||||
(monitored && !monitor))
|
||||
log_very_verbose("%s %smonitoring still pending: waiting...",
|
||||
display_lvname(lv), monitor ? "" : "un");
|
||||
else
|
||||
break;
|
||||
log_very_verbose("%s %smonitoring still pending: waiting...",
|
||||
display_lvname(lv), monitor ? "" : "un");
|
||||
usleep(10000 * i);
|
||||
}
|
||||
|
||||
|
@@ -168,8 +168,6 @@ int lv_snapshot_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int wait, dm_percent_t *percent, uint32_t *event_nr);
|
||||
int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt);
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset);
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health);
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt);
|
||||
int lv_raid_sync_action(const struct logical_volume *lv, char **sync_action);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -214,14 +214,6 @@ typedef enum {
|
||||
STATUS, /* DM_DEVICE_STATUS ioctl */
|
||||
} info_type_t;
|
||||
|
||||
/* Return length of segment depending on type and reshape_len */
|
||||
static uint32_t _seg_len(const struct lv_segment *seg)
|
||||
{
|
||||
uint32_t reshape_len = seg_is_raid(seg) ? ((seg->area_count - seg->segtype->parity_devs) * seg->reshape_len) : 0;
|
||||
|
||||
return seg->len - reshape_len;
|
||||
}
|
||||
|
||||
static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status,
|
||||
@@ -258,7 +250,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
if (seg_status && dminfo->exists) {
|
||||
start = length = seg_status->seg->lv->vg->extent_size;
|
||||
start *= seg_status->seg->le;
|
||||
length *= _seg_len(seg_status->seg);
|
||||
length *= seg_status->seg->len;
|
||||
|
||||
do {
|
||||
target = dm_get_next_target(dmt, target, &target_start,
|
||||
@@ -2222,7 +2214,7 @@ static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *d
|
||||
struct lv_segment *seg_i;
|
||||
struct dm_info info;
|
||||
int segno = -1, i = 0;
|
||||
uint64_t size = (uint64_t) _seg_len(seg) * seg->lv->vg->extent_size;
|
||||
uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
|
||||
|
||||
dm_list_iterate_items(seg_i, &seg->lv->segments) {
|
||||
if (seg == seg_i) {
|
||||
@@ -2508,7 +2500,7 @@ static int _add_target_to_dtree(struct dev_manager *dm,
|
||||
return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
|
||||
&dm->target_state, seg,
|
||||
laopts, dnode,
|
||||
extent_size * _seg_len(seg),
|
||||
extent_size * seg->len,
|
||||
&dm->pvmove_mirror_count);
|
||||
}
|
||||
|
||||
@@ -2701,7 +2693,7 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
|
||||
/* Replace target and all its used devs with error mapping */
|
||||
log_debug_activation("Using error for pending delete %s.",
|
||||
display_lvname(seg->lv));
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * _seg_len(seg)))
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * seg->len))
|
||||
return_0;
|
||||
} else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
|
||||
return_0;
|
||||
@@ -3173,6 +3165,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
log_error(INTERNAL_ERROR "_tree_action: Action %u not supported.", action);
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = 1;
|
||||
|
||||
out:
|
||||
|
@@ -1864,9 +1864,7 @@ cfg(dmeventd_thin_command_CFG, "thin_command", dmeventd_CFG_SECTION, CFG_DEFAULT
|
||||
"The plugin runs command with each 5% increment when thin-pool data volume\n"
|
||||
"or metadata volume gets above 50%.\n"
|
||||
"Command which starts with 'lvm ' prefix is internal lvm command.\n"
|
||||
"You can write your own handler to customise behaviour in more details.\n"
|
||||
"User handler is specified with the full path starting with '/'.\n")
|
||||
/* TODO: systemd service handler */
|
||||
"You can write your own handler to customise behaviour in more details.\n")
|
||||
|
||||
cfg(dmeventd_executable_CFG, "executable", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_PATH, vsn(2, 2, 73), "@DMEVENTD_PATH@", 0, NULL,
|
||||
"The full path to the dmeventd binary.\n")
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -71,7 +71,7 @@
|
||||
* FIXME: Increase these to 64 and further to the MD maximum
|
||||
* once the SubLVs split and name shift got enhanced
|
||||
*/
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 64
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 10
|
||||
#define DEFAULT_RAID_MAX_IMAGES 64
|
||||
#define DEFAULT_ALLOCATION_STRIPE_ALL_DEVICES 0 /* Don't stripe across all devices if not -i/--stripes given */
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -225,8 +225,8 @@ static int _read_linear(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
while (le < lvm->lv->le_count) {
|
||||
len = _area_length(lvm, le);
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate linear segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -297,10 +297,10 @@ static int _read_stripes(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv,
|
||||
lvm->stripes * first_area_le,
|
||||
lvm->stripes * area_len, 0,
|
||||
lvm->stripes * area_len,
|
||||
0, lvm->stripe_size, NULL,
|
||||
lvm->stripes,
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate striped segment.");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -192,9 +192,9 @@ static int _add_stripe_seg(struct dm_pool *mem,
|
||||
return_0;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len * usp->num_devs, 0, 0,
|
||||
area_len * usp->num_devs, 0,
|
||||
usp->striping, NULL, usp->num_devs,
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate striped lv_segment structure");
|
||||
return 0;
|
||||
}
|
||||
@@ -232,8 +232,8 @@ static int _add_linear_seg(struct dm_pool *mem,
|
||||
area_len = (usp->devs[j].blocks) / POOL_PE_SIZE;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len, 0, 0, usp->striping,
|
||||
NULL, 1, area_len, 0,
|
||||
area_len, 0, usp->striping,
|
||||
NULL, 1, area_len,
|
||||
POOL_PE_SIZE, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate linear lv_segment "
|
||||
"structure");
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -583,10 +583,8 @@ static int _print_segment(struct formatter *f, struct volume_group *vg,
|
||||
outf(f, "start_extent = %u", seg->le);
|
||||
outsize(f, (uint64_t) seg->len * vg->extent_size,
|
||||
"extent_count = %u", seg->len);
|
||||
|
||||
outnl(f);
|
||||
if (seg->reshape_len)
|
||||
outsize(f, (uint64_t) seg->reshape_len * vg->extent_size,
|
||||
"reshape_count = %u", seg->reshape_len);
|
||||
outf(f, "type = \"%s\"", seg->segtype->name);
|
||||
|
||||
if (!_out_list(f, &seg->tags, "tags"))
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -61,9 +61,6 @@ static const struct flag _lv_flags[] = {
|
||||
{LOCKED, "LOCKED", STATUS_FLAG},
|
||||
{LV_NOTSYNCED, "NOTSYNCED", STATUS_FLAG},
|
||||
{LV_REBUILD, "REBUILD", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_PLUS, "RESHAPE_DELTA_DISKS_PLUS", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_MINUS, "RESHAPE_DELTA_DISKS_MINUS", STATUS_FLAG},
|
||||
{LV_REMOVE_AFTER_RESHAPE, "REMOVE_AFTER_RESHAPE", STATUS_FLAG},
|
||||
{LV_WRITEMOSTLY, "WRITEMOSTLY", STATUS_FLAG},
|
||||
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
|
||||
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -354,7 +354,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
struct lv_segment *seg;
|
||||
const struct dm_config_node *sn_child = sn->child;
|
||||
const struct dm_config_value *cv;
|
||||
uint32_t area_extents, start_extent, extent_count, reshape_count, data_copies;
|
||||
uint32_t start_extent, extent_count;
|
||||
struct segment_type *segtype;
|
||||
const char *segtype_str;
|
||||
|
||||
@@ -375,12 +375,6 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_read_int32(sn_child, "reshape_count", &reshape_count))
|
||||
reshape_count = 0;
|
||||
|
||||
if (!_read_int32(sn_child, "data_copies", &data_copies))
|
||||
data_copies = 1;
|
||||
|
||||
segtype_str = SEG_TYPE_NAME_STRIPED;
|
||||
|
||||
if (!dm_config_get_str(sn_child, "type", &segtype_str)) {
|
||||
@@ -395,11 +389,9 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
!segtype->ops->text_import_area_count(sn_child, &area_count))
|
||||
return_0;
|
||||
|
||||
area_extents = segtype->parity_devs ?
|
||||
raid_rimage_extents(segtype, extent_count, area_count - segtype->parity_devs, data_copies) : extent_count;
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
|
||||
extent_count, reshape_count, 0, 0, NULL, area_count,
|
||||
area_extents, data_copies, 0, 0, 0, NULL))) {
|
||||
extent_count, 0, 0, NULL, area_count,
|
||||
extent_count, 0, 0, 0, NULL))) {
|
||||
log_error("Segment allocation failed");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -1278,8 +1278,6 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
repstr[8] = 'm'; /* RAID has 'm'ismatches */
|
||||
} else if (lv->status & LV_WRITEMOSTLY)
|
||||
repstr[8] = 'w'; /* sub-LV has 'w'ritemostly */
|
||||
else if (lv->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
repstr[8] = 'R'; /* sub-LV got 'R'emoved from raid set by reshaping */
|
||||
} else if (lvdm->seg_status.type == SEG_STATUS_CACHE) {
|
||||
if (lvdm->seg_status.cache->fail)
|
||||
repstr[8] = 'F';
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -21,13 +21,11 @@
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -897,9 +897,8 @@ static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t exte
|
||||
/* Round up extents to stripe divisible amount */
|
||||
if ((size_rest = extents % stripes)) {
|
||||
new_extents += extend ? stripes - size_rest : -size_rest;
|
||||
log_print_unless_silent("Rounding size %s (%u extents) %s to stripe boundary size %s(%u extents).",
|
||||
log_print_unless_silent("Rounding size %s (%u extents) up to stripe boundary size %s (%u extents).",
|
||||
display_size(vg->cmd, (uint64_t) extents * vg->extent_size), extents,
|
||||
new_extents < extents ? "down" : "up",
|
||||
display_size(vg->cmd, (uint64_t) new_extents * vg->extent_size), new_extents);
|
||||
}
|
||||
|
||||
@@ -912,13 +911,11 @@ static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t exte
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
@@ -952,12 +949,10 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
seg->lv = lv;
|
||||
seg->le = le;
|
||||
seg->len = len;
|
||||
seg->reshape_len = reshape_len;
|
||||
seg->status = status;
|
||||
seg->stripe_size = stripe_size;
|
||||
seg->area_count = area_count;
|
||||
seg->area_len = area_len;
|
||||
seg->data_copies = data_copies ? : lv_raid_data_copies(segtype, area_count);
|
||||
seg->chunk_size = chunk_size;
|
||||
seg->region_size = region_size;
|
||||
seg->extents_copied = extents_copied;
|
||||
@@ -978,37 +973,6 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
return seg;
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporary helper to return number of data copies for
|
||||
* RAID segment @seg until seg->data_copies got added
|
||||
*/
|
||||
static uint32_t _raid_data_copies(struct lv_segment *seg)
|
||||
{
|
||||
/*
|
||||
* FIXME: needs to change once more than 2 are supported.
|
||||
* I.e. use seg->data_copies then
|
||||
*/
|
||||
if (seg_is_raid10(seg))
|
||||
return 2;
|
||||
else if (seg_is_raid1(seg))
|
||||
return seg->area_count;
|
||||
|
||||
return seg->segtype->parity_devs + 1;
|
||||
}
|
||||
|
||||
/* Data image count for RAID segment @seg */
|
||||
static uint32_t _raid_stripes_count(struct lv_segment *seg)
|
||||
{
|
||||
/*
|
||||
* FIXME: raid10 needs to change once more than
|
||||
* 2 data_copies and odd # of legs supported.
|
||||
*/
|
||||
if (seg_is_raid10(seg))
|
||||
return seg->area_count / _raid_data_copies(seg);
|
||||
|
||||
return seg->area_count - seg->segtype->parity_devs;
|
||||
}
|
||||
|
||||
static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t s,
|
||||
uint32_t area_reduction, int with_discard)
|
||||
{
|
||||
@@ -1049,39 +1013,32 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
|
||||
}
|
||||
|
||||
if (lv_is_raid_image(lv)) {
|
||||
/* Calculate the amount of extents to reduce per rmate/rimage LV */
|
||||
uint32_t rimage_extents;
|
||||
struct lv_segment *seg1 = first_seg(lv);
|
||||
|
||||
/* FIXME: avoid extra seg_is_*() conditionals here */
|
||||
rimage_extents = raid_rimage_extents(seg1->segtype, area_reduction, seg_is_any_raid0(seg) ? 0 : _raid_stripes_count(seg),
|
||||
seg_is_raid10(seg) ? 1 :_raid_data_copies(seg));
|
||||
if (!rimage_extents)
|
||||
/*
|
||||
* FIXME: Use lv_reduce not lv_remove
|
||||
* We use lv_remove for now, because I haven't figured out
|
||||
* why lv_reduce won't remove the LV.
|
||||
lv_reduce(lv, area_reduction);
|
||||
*/
|
||||
if (area_reduction != seg->area_len) {
|
||||
log_error("Unable to reduce RAID LV - operation not implemented.");
|
||||
return 0;
|
||||
|
||||
if (seg->meta_areas) {
|
||||
uint32_t meta_area_reduction;
|
||||
struct logical_volume *mlv;
|
||||
struct volume_group *vg = lv->vg;
|
||||
|
||||
if (seg_metatype(seg, s) != AREA_LV ||
|
||||
!(mlv = seg_metalv(seg, s)))
|
||||
} else {
|
||||
if (!lv_remove(lv)) {
|
||||
log_error("Failed to remove RAID image %s.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
|
||||
meta_area_reduction = raid_rmeta_extents_delta(vg->cmd, lv->le_count, lv->le_count - rimage_extents,
|
||||
seg->region_size, vg->extent_size);
|
||||
/* Limit for raid0_meta not having region size set */
|
||||
if (meta_area_reduction > mlv->le_count ||
|
||||
!(lv->le_count - rimage_extents))
|
||||
meta_area_reduction = mlv->le_count;
|
||||
|
||||
if (meta_area_reduction &&
|
||||
!lv_reduce(mlv, meta_area_reduction))
|
||||
return_0; /* FIXME: any upper level reporting */
|
||||
}
|
||||
}
|
||||
|
||||
if (!lv_reduce(lv, rimage_extents))
|
||||
return_0; /* FIXME: any upper level reporting */
|
||||
/* Remove metadata area if image has been removed */
|
||||
if (seg->meta_areas && seg_metalv(seg, s) && (area_reduction == seg->area_len)) {
|
||||
if (!lv_reduce(seg_metalv(seg, s),
|
||||
seg_metalv(seg, s)->le_count)) {
|
||||
log_error("Failed to remove RAID meta-device %s.",
|
||||
display_lvname(seg_metalv(seg, s)));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1261,7 +1218,7 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
* the 'stripes' argument will always need to
|
||||
* be given.
|
||||
*/
|
||||
if (segtype_is_raid10(segtype)) {
|
||||
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
|
||||
if (!stripes)
|
||||
return area_count / 2;
|
||||
return stripes;
|
||||
@@ -1281,35 +1238,25 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
{
|
||||
uint32_t area_reduction, s;
|
||||
uint32_t areas = (seg->area_count / (seg_is_raid10(seg) ? seg->data_copies : 1)) - seg->segtype->parity_devs;
|
||||
|
||||
/* Caller must ensure exact divisibility */
|
||||
// if (!seg_is_raid10(seg) && (seg_is_striped(seg) || seg_is_striped_raid(seg))) {
|
||||
if (seg_is_striped(seg) || seg_is_striped_raid(seg)) {
|
||||
if (reduction % areas) {
|
||||
if (seg_is_striped(seg)) {
|
||||
if (reduction % seg->area_count) {
|
||||
log_error("Segment extent reduction %" PRIu32
|
||||
" not divisible by #stripes %" PRIu32,
|
||||
reduction, seg->area_count);
|
||||
return 0;
|
||||
}
|
||||
area_reduction = reduction / areas;
|
||||
area_reduction = (reduction / seg->area_count);
|
||||
} else
|
||||
area_reduction = reduction;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (!release_and_discard_lv_segment_area(seg, s, area_reduction))
|
||||
return_0;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
seg->len -= reduction;
|
||||
//pprintf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len -= area_reduction;
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
seg->area_len -= area_reduction;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1319,13 +1266,11 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
*/
|
||||
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);;
|
||||
struct lv_segment *seg;
|
||||
uint32_t count = extents;
|
||||
uint32_t reduction;
|
||||
struct logical_volume *pool_lv;
|
||||
struct logical_volume *external_lv = NULL;
|
||||
int is_raid10 = seg_is_any_raid10(seg) && seg->reshape_len;
|
||||
uint32_t data_copies = seg->data_copies;
|
||||
|
||||
if (lv_is_merging_origin(lv)) {
|
||||
log_debug_metadata("Dropping snapshot merge of %s to removed origin %s.",
|
||||
@@ -1333,7 +1278,6 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
clear_snapshot_merge(lv);
|
||||
}
|
||||
|
||||
//printf("%s[%u] lv=%s is_raid10=%d le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u seg->reshape_len=%u\n", __func__, __LINE__, lv->name, is_raid10, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711, seg->reshape_len);
|
||||
dm_list_iterate_back_items(seg, &lv->segments) {
|
||||
if (!count)
|
||||
break;
|
||||
@@ -1389,21 +1333,11 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
count -= reduction;
|
||||
}
|
||||
|
||||
seg = first_seg(lv);
|
||||
//printf("%s[%u] lv=%s le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
if (is_raid10) {
|
||||
lv->le_count -= extents * data_copies;
|
||||
if (seg)
|
||||
seg->len = seg->area_len = lv->le_count;
|
||||
} else
|
||||
lv->le_count -= extents;
|
||||
|
||||
lv->le_count -= extents;
|
||||
lv->size = (uint64_t) lv->le_count * lv->vg->extent_size;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (!delete)
|
||||
return 1;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (lv == lv->vg->pool_metadata_spare_lv) {
|
||||
lv->status &= ~POOL_METADATA_SPARE;
|
||||
@@ -1511,13 +1445,6 @@ int lv_refresh_suspend_resume(const struct logical_volume *lv)
|
||||
*/
|
||||
int lv_reduce(struct logical_volume *lv, uint32_t extents)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
/* Ensure stipe boundary extents on RAID LVs */
|
||||
if (lv_is_raid(lv) && extents != lv->le_count)
|
||||
extents =_round_to_stripe_boundary(lv->vg, extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg), 0);
|
||||
|
||||
return _lv_reduce(lv, extents, 1);
|
||||
}
|
||||
|
||||
@@ -1819,10 +1746,10 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
area_multiple = _calc_area_multiple(segtype, area_count, 0);
|
||||
extents = aa[0].len * area_multiple;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
status, stripe_size, NULL,
|
||||
area_count,
|
||||
aa[0].len, 0, 0u, region_size, 0u, NULL))) {
|
||||
aa[0].len, 0u, region_size, 0u, NULL))) {
|
||||
log_error("Couldn't allocate new LV segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -1834,7 +1761,7 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
dm_list_add(&lv->segments, &seg->list);
|
||||
|
||||
extents = aa[0].len * area_multiple;
|
||||
//printf("%s[%u] le_count=%u extents=%u\n", __func__, __LINE__, lv->le_count, extents);
|
||||
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
@@ -3260,9 +3187,9 @@ int lv_add_virtual_segment(struct logical_volume *lv, uint64_t status,
|
||||
seg->area_len += extents;
|
||||
seg->len += extents;
|
||||
} else {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
status, 0, NULL, 0,
|
||||
extents, 0, 0, 0, 0, NULL))) {
|
||||
extents, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new %s segment.", segtype->name);
|
||||
return 0;
|
||||
}
|
||||
@@ -3381,24 +3308,19 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
|
||||
|
||||
if (segtype_is_raid(segtype)) {
|
||||
if (metadata_area_count) {
|
||||
uint32_t cur_rimage_extents, new_rimage_extents;
|
||||
|
||||
if (metadata_area_count != area_count)
|
||||
log_error(INTERNAL_ERROR
|
||||
"Bad metadata_area_count");
|
||||
ah->metadata_area_count = area_count;
|
||||
ah->alloc_and_split_meta = 1;
|
||||
|
||||
ah->log_len = RAID_METADATA_AREA_LEN;
|
||||
|
||||
/* Calculate log_len (i.e. length of each rmeta device) for RAID */
|
||||
cur_rimage_extents = raid_rimage_extents(segtype, existing_extents, stripes, mirrors);
|
||||
new_rimage_extents = raid_rimage_extents(segtype, existing_extents + new_extents, stripes, mirrors),
|
||||
ah->log_len = raid_rmeta_extents_delta(cmd, cur_rimage_extents, new_rimage_extents,
|
||||
region_size, extent_size);
|
||||
ah->metadata_area_count = metadata_area_count;
|
||||
ah->alloc_and_split_meta = !!ah->log_len;
|
||||
/*
|
||||
* We need 'log_len' extents for each
|
||||
* RAID device's metadata_area
|
||||
*/
|
||||
total_extents += ah->log_len * (segtype_is_raid1(segtype) ? 1 : ah->area_multiple);
|
||||
total_extents += (ah->log_len * ah->area_multiple);
|
||||
} else {
|
||||
ah->log_area_count = 0;
|
||||
ah->log_len = 0;
|
||||
@@ -3588,10 +3510,10 @@ static struct lv_segment *_convert_seg_to_mirror(struct lv_segment *seg,
|
||||
}
|
||||
|
||||
if (!(newseg = alloc_lv_segment(get_segtype_from_string(seg->lv->vg->cmd, SEG_TYPE_NAME_MIRROR),
|
||||
seg->lv, seg->le, seg->len, 0,
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->status, seg->stripe_size,
|
||||
log_lv,
|
||||
seg->area_count, seg->area_len, 0,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->chunk_size, region_size,
|
||||
seg->extents_copied, NULL))) {
|
||||
log_error("Couldn't allocate converted LV segment.");
|
||||
@@ -3693,8 +3615,8 @@ int lv_add_segmented_mirror_image(struct alloc_handle *ah,
|
||||
}
|
||||
|
||||
if (!(new_seg = alloc_lv_segment(segtype, copy_lv,
|
||||
seg->le, seg->len, 0, PVMOVE, 0,
|
||||
NULL, 1, seg->len, 0,
|
||||
seg->le, seg->len, PVMOVE, 0,
|
||||
NULL, 1, seg->len,
|
||||
0, 0, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
@@ -3889,9 +3811,9 @@ static int _lv_insert_empty_sublvs(struct logical_volume *lv,
|
||||
/*
|
||||
* First, create our top-level segment for our top-level LV
|
||||
*/
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, 0, lv->status,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, lv->status,
|
||||
stripe_size, NULL,
|
||||
devices, 0, 0, 0, region_size, 0, NULL))) {
|
||||
devices, 0, 0, region_size, 0, NULL))) {
|
||||
log_error("Failed to create mapping segment for %s.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
@@ -4089,16 +4011,25 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
lv_set_hidden(seg_metalv(seg, s));
|
||||
}
|
||||
|
||||
seg->area_len += extents / area_multiple;
|
||||
seg->len += extents;
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len += extents / area_multiple;
|
||||
|
||||
//pprintf("%s[%u] le_count=%u extents=%u seg->len=%u seg-area_len=%u\n", __func__, __LINE__, lv->le_count, extents, seg->len, seg->area_len);
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* The MD bitmap is limited to being able to track 2^21 regions.
|
||||
* The region_size must be adjusted to meet that criteria
|
||||
* unless raid0/raid0_meta, which doesn't have a bitmap.
|
||||
*/
|
||||
if (seg_is_raid(seg) && !seg_is_any_raid0(seg))
|
||||
while (seg->region_size < (lv->size / (1 << 21))) {
|
||||
seg->region_size *= 2;
|
||||
log_very_verbose("Adjusting RAID region_size from %uS to %uS"
|
||||
" to support large LV size",
|
||||
seg->region_size/2, seg->region_size);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4125,7 +4056,6 @@ int lv_extend(struct logical_volume *lv,
|
||||
uint32_t sub_lv_count;
|
||||
uint32_t old_extents;
|
||||
uint32_t new_extents; /* Total logical size after extension. */
|
||||
uint64_t raid_size;
|
||||
|
||||
log_very_verbose("Adding segment of type %s to LV %s.", segtype->name, lv->name);
|
||||
|
||||
@@ -4147,22 +4077,6 @@ int lv_extend(struct logical_volume *lv,
|
||||
}
|
||||
/* FIXME log_count should be 1 for mirrors */
|
||||
|
||||
if (segtype_is_raid(segtype) && !segtype_is_any_raid0(segtype)) {
|
||||
raid_size = ((uint64_t) lv->le_count + extents) * lv->vg->extent_size;
|
||||
|
||||
/*
|
||||
* The MD bitmap is limited to being able to track 2^21 regions.
|
||||
* The region_size must be adjusted to meet that criteria
|
||||
* unless raid0/raid0_meta, which doesn't have a bitmap.
|
||||
*/
|
||||
|
||||
region_size = raid_ensure_min_region_size(lv, raid_size, region_size);
|
||||
|
||||
if (first_seg(lv))
|
||||
first_seg(lv)->region_size = region_size;
|
||||
|
||||
}
|
||||
|
||||
if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors,
|
||||
log_count, region_size, extents,
|
||||
allocatable_pvs, alloc, approx_alloc, NULL)))
|
||||
@@ -4743,11 +4657,6 @@ static uint32_t lvseg_get_stripes(struct lv_segment *seg, uint32_t *stripesize)
|
||||
return seg->area_count;
|
||||
}
|
||||
|
||||
if (seg_is_raid(seg)) {
|
||||
*stripesize = seg->stripe_size;
|
||||
return _raid_stripes_count(seg);
|
||||
}
|
||||
|
||||
*stripesize = 0;
|
||||
return 0;
|
||||
}
|
||||
@@ -5413,7 +5322,6 @@ int lv_resize(struct logical_volume *lv,
|
||||
struct logical_volume *lock_lv = (struct logical_volume*) lv_lock_holder(lv);
|
||||
struct logical_volume *aux_lv = NULL; /* Note: aux_lv never resizes fs */
|
||||
struct lvresize_params aux_lp;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
int activated = 0;
|
||||
int ret = 0;
|
||||
int status;
|
||||
@@ -5455,11 +5363,6 @@ int lv_resize(struct logical_volume *lv,
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure stripe boundary extents! */
|
||||
if (!lp->percent && lv_is_raid(lv))
|
||||
lp->extents =_round_to_stripe_boundary(lv->vg, lp->extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg),
|
||||
lp->resize == LV_REDUCE ? 0 : 1);
|
||||
if (aux_lv && !_lvresize_prepare(&aux_lv, &aux_lp, pvh))
|
||||
return_0;
|
||||
|
||||
@@ -6330,6 +6233,7 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
|
||||
|
||||
log_very_verbose("Updating logical volume %s on disk(s)%s.",
|
||||
display_lvname(lock_lv), origin_only ? " (origin only)": "");
|
||||
|
||||
if (!vg_write(vg))
|
||||
return_0;
|
||||
|
||||
@@ -6796,8 +6700,8 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
|
||||
return_NULL;
|
||||
|
||||
/* allocate a new linear segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count, 0,
|
||||
status, 0, NULL, 1, layer_lv->le_count, 0,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count,
|
||||
status, 0, NULL, 1, layer_lv->le_count,
|
||||
0, 0, 0, NULL)))
|
||||
return_NULL;
|
||||
|
||||
@@ -6853,8 +6757,8 @@ static int _extend_layer_lv_for_segment(struct logical_volume *layer_lv,
|
||||
|
||||
/* allocate a new segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, layer_lv, layer_lv->le_count,
|
||||
seg->area_len, 0, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, 0, seg)))
|
||||
seg->area_len, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, seg)))
|
||||
return_0;
|
||||
|
||||
/* map the new segment to the original underlying are */
|
||||
|
@@ -236,7 +236,7 @@ static void _check_raid_seg(struct lv_segment *seg, int *error_count)
|
||||
if (!seg->areas)
|
||||
raid_seg_error("zero areas");
|
||||
|
||||
if (seg->extents_copied > seg->len)
|
||||
if (seg->extents_copied > seg->area_len)
|
||||
raid_seg_error_val("extents_copied too large", seg->extents_copied);
|
||||
|
||||
/* Default < 10, change once raid1 split shift and rename SubLVs works! */
|
||||
@@ -475,7 +475,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
struct lv_segment *seg, *seg2;
|
||||
uint32_t le = 0;
|
||||
unsigned seg_count = 0, seg_found, external_lv_found = 0;
|
||||
uint32_t data_rimage_count, s;
|
||||
uint32_t area_multiplier, s;
|
||||
struct seg_list *sl;
|
||||
struct glv_list *glvl;
|
||||
int error_count = 0;
|
||||
@@ -498,14 +498,13 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
area_multiplier = segtype_is_striped(seg->segtype) ?
|
||||
seg->area_count : 1;
|
||||
|
||||
data_rimage_count = seg->area_count - seg->segtype->parity_devs;
|
||||
/* FIXME: raid varies seg->area_len? */
|
||||
if (seg->len != seg->area_len &&
|
||||
seg->len != seg->area_len * data_rimage_count) {
|
||||
log_error("LV %s: segment %u with len=%u "
|
||||
" has inconsistent area_len %u",
|
||||
lv->name, seg_count, seg->len, seg->area_len);
|
||||
if (seg->area_len * area_multiplier != seg->len) {
|
||||
log_error("LV %s: segment %u has inconsistent "
|
||||
"area_len %u",
|
||||
lv->name, seg_count, seg->area_len);
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
@@ -767,10 +766,10 @@ static int _lv_split_segment(struct logical_volume *lv, struct lv_segment *seg,
|
||||
|
||||
/* Clone the existing segment */
|
||||
if (!(split_seg = alloc_lv_segment(seg->segtype,
|
||||
seg->lv, seg->le, seg->len, seg->reshape_len,
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->status, seg->stripe_size,
|
||||
seg->log_lv,
|
||||
seg->area_count, seg->area_len, seg->data_copies,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->chunk_size, seg->region_size,
|
||||
seg->extents_copied, seg->pvmove_source_seg))) {
|
||||
log_error("Couldn't allocate cloned LV segment.");
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -137,11 +137,7 @@
|
||||
e.g. to prohibit allocation of a RAID image
|
||||
on a PV already holing an image of the RAID set */
|
||||
#define LOCKD_SANLOCK_LV UINT64_C(0x0080000000000000) /* LV - Internal use only */
|
||||
#define LV_RESHAPE_DELTA_DISKS_PLUS UINT64_C(0x0100000000000000) /* LV reshape flag delta disks plus image(s) */
|
||||
#define LV_RESHAPE_DELTA_DISKS_MINUS UINT64_C(0x0200000000000000) /* LV reshape flag delta disks minus image(s) */
|
||||
|
||||
#define LV_REMOVE_AFTER_RESHAPE UINT64_C(0x0400000000000000) /* LV needs to be removed after a shrinking reshape */
|
||||
/* Next unused flag: UINT64_C(0x0800000000000000) */
|
||||
/* Next unused flag: UINT64_C(0x0100000000000000) */
|
||||
|
||||
/* Format features flags */
|
||||
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
|
||||
@@ -450,7 +446,6 @@ struct lv_segment {
|
||||
const struct segment_type *segtype;
|
||||
uint32_t le;
|
||||
uint32_t len;
|
||||
uint32_t reshape_len; /* For RAID: user hidden additional out of place reshaping length off area_len and len */
|
||||
|
||||
uint64_t status;
|
||||
|
||||
@@ -459,7 +454,6 @@ struct lv_segment {
|
||||
uint32_t writebehind; /* For RAID (RAID1 only) */
|
||||
uint32_t min_recovery_rate; /* For RAID */
|
||||
uint32_t max_recovery_rate; /* For RAID */
|
||||
uint32_t data_offset; /* For RAID: data offset in sectors on each data component image */
|
||||
uint32_t area_count;
|
||||
uint32_t area_len;
|
||||
uint32_t chunk_size; /* For snapshots/thin_pool. In sectors. */
|
||||
@@ -470,7 +464,6 @@ struct lv_segment {
|
||||
struct logical_volume *cow;
|
||||
struct dm_list origin_list;
|
||||
uint32_t region_size; /* For mirrors, replicators - in sectors */
|
||||
uint32_t data_copies; /* For RAID: number of data copies (e.g. 3 for RAID 6 */
|
||||
uint32_t extents_copied;/* Number of extents synced for raids/mirrors */
|
||||
struct logical_volume *log_lv;
|
||||
struct lv_segment *pvmove_source_seg;
|
||||
@@ -1212,8 +1205,7 @@ struct logical_volume *first_replicator_dev(const struct logical_volume *lv);
|
||||
int lv_is_raid_with_tracking(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv);
|
||||
int lv_raid_change_image_count(struct logical_volume *lv,
|
||||
uint32_t new_count, const uint32_t region_size,
|
||||
struct dm_list *allocate_pvs);
|
||||
uint32_t new_count, struct dm_list *allocate_pvs);
|
||||
int lv_raid_split(struct logical_volume *lv, const char *split_name,
|
||||
uint32_t new_count, struct dm_list *splittable_pvs);
|
||||
int lv_raid_split_and_track(struct logical_volume *lv,
|
||||
@@ -1232,16 +1224,8 @@ int lv_raid_replace(struct logical_volume *lv, int force,
|
||||
struct dm_list *remove_pvs, struct dm_list *allocate_pvs);
|
||||
int lv_raid_remove_missing(struct logical_volume *lv);
|
||||
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
|
||||
uint32_t raid_rmeta_extents_delta(struct cmd_context *cmd,
|
||||
uint32_t rimage_extents_cur, uint32_t rimage_extents_new,
|
||||
uint32_t region_size, uint32_t extent_size);
|
||||
uint32_t raid_rimage_extents(const struct segment_type *segtype,
|
||||
uint32_t extents, uint32_t stripes, uint32_t data_copies);
|
||||
uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t raid_size, uint32_t region_size);
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size);
|
||||
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
|
||||
int lv_raid_in_sync(const struct logical_volume *lv);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
@@ -1256,7 +1256,7 @@ uint32_t extents_from_percent_size(struct volume_group *vg, const struct dm_list
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* fall through to use all PVs in VG like %FREE */
|
||||
/* Fall back to use all PVs in VG like %FREE */
|
||||
case PERCENT_FREE:
|
||||
if (!(extents = vg->free_count)) {
|
||||
log_error("No free extents in Volume group %s.", vg->name);
|
||||
@@ -6388,7 +6388,7 @@ int vg_strip_outdated_historical_lvs(struct volume_group *vg) {
|
||||
* Removal time in the future? Not likely,
|
||||
* but skip this item in any case.
|
||||
*/
|
||||
if (current_time < (time_t) glvl->glv->historical->timestamp_removed)
|
||||
if ((current_time) < glvl->glv->historical->timestamp_removed)
|
||||
continue;
|
||||
|
||||
if ((current_time - glvl->glv->historical->timestamp_removed) > threshold) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -43,8 +43,7 @@ struct segment_type *get_segtype_from_flag(struct cmd_context *cmd, uint64_t fla
|
||||
{
|
||||
struct segment_type *segtype;
|
||||
|
||||
/* Iterate backwards to provide aliases; e.g. raid5 instead of raid5_ls */
|
||||
dm_list_iterate_back_items(segtype, &cmd->segtypes)
|
||||
dm_list_iterate_items(segtype, &cmd->segtypes)
|
||||
if (flag & segtype->flags)
|
||||
return segtype;
|
||||
|
||||
|
@@ -50,8 +50,7 @@ struct dev_manager;
|
||||
#define SEG_RAID0 0x0000000000040000ULL
|
||||
#define SEG_RAID0_META 0x0000000000080000ULL
|
||||
#define SEG_RAID1 0x0000000000100000ULL
|
||||
#define SEG_RAID10_NEAR 0x0000000000200000ULL
|
||||
#define SEG_RAID10 SEG_RAID10_NEAR
|
||||
#define SEG_RAID10 0x0000000000200000ULL
|
||||
#define SEG_RAID4 0x0000000000400000ULL
|
||||
#define SEG_RAID5_N 0x0000000000800000ULL
|
||||
#define SEG_RAID5_LA 0x0000000001000000ULL
|
||||
@@ -140,11 +139,7 @@ struct dev_manager;
|
||||
#define segtype_is_any_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10_near(segtype) segtype_is_raid10(segtype)
|
||||
/* FIXME: once raid10_offset supported */
|
||||
#define segtype_is_raid10_offset(segtype) 0 // ((segtype)->flags & SEG_RAID10_OFFSET ? 1 : 0)
|
||||
#define segtype_is_raid_with_meta(segtype) (segtype_is_raid(segtype) && !segtype_is_raid0(segtype))
|
||||
#define segtype_is_striped_raid(segtype) (segtype_is_raid(segtype) && !segtype_is_raid1(segtype))
|
||||
#define segtype_is_reshapable_raid(segtype) ((segtype_is_striped_raid(segtype) && !segtype_is_any_raid0(segtype)) || segtype_is_raid10_near(segtype) || segtype_is_raid10_offset(segtype))
|
||||
#define segtype_is_snapshot(segtype) ((segtype)->flags & SEG_SNAPSHOT ? 1 : 0)
|
||||
#define segtype_is_striped(segtype) ((segtype)->flags & SEG_AREAS_STRIPED ? 1 : 0)
|
||||
#define segtype_is_thin(segtype) ((segtype)->flags & (SEG_THIN_POOL|SEG_THIN_VOLUME) ? 1 : 0)
|
||||
@@ -194,8 +189,6 @@ struct dev_manager;
|
||||
#define seg_is_raid10(seg) segtype_is_raid10((seg)->segtype)
|
||||
#define seg_is_raid10_near(seg) segtype_is_raid10_near((seg)->segtype)
|
||||
#define seg_is_raid_with_meta(seg) segtype_is_raid_with_meta((seg)->segtype)
|
||||
#define seg_is_striped_raid(seg) segtype_is_striped_raid((seg)->segtype)
|
||||
#define seg_is_reshapable_raid(seg) segtype_is_reshapable_raid((seg)->segtype)
|
||||
#define seg_is_replicator(seg) ((seg)->segtype->flags & SEG_REPLICATOR ? 1 : 0)
|
||||
#define seg_is_replicator_dev(seg) ((seg)->segtype->flags & SEG_REPLICATOR_DEV ? 1 : 0)
|
||||
#define seg_is_snapshot(seg) segtype_is_snapshot((seg)->segtype)
|
||||
@@ -286,7 +279,6 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
|
||||
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.7 */
|
||||
#define RAID_FEATURE_RESHAPING (1U << 2) /* version 1.8 */
|
||||
#define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */
|
||||
#define RAID_FEATURE_RESHAPE (1U << 4) /* version 1.10.2 */
|
||||
|
||||
#ifdef RAID_INTERNAL
|
||||
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
|
||||
|
@@ -238,8 +238,8 @@ static struct lv_segment *_alloc_snapshot_seg(struct logical_volume *lv)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new snapshot segment.");
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -112,7 +112,7 @@ static takeover_fn_t _takeover_fns[][11] = {
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r45, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r45_r1, r45_r54, r45_r6, X , X , X },
|
||||
/* raid6 */ { X , r6__str, X , r6__r0, r6__r0m, X , r6__r45, X , X , X , X },
|
||||
/* raid10 */ { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , X , X , X },
|
||||
/* raid10 */ // { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , r10_r10, r10_r01, X },
|
||||
/* raid01 */ // { X , r01_str, X , X , X , X , X , X , r01_r10, r01_r01, X },
|
||||
/* other */ { X , X , X , X , X , X , X , X , X , X , X },
|
||||
};
|
||||
|
@@ -15,6 +15,7 @@
|
||||
|
||||
#include "lib.h"
|
||||
#include "config.h"
|
||||
#include "lvm-file.h"
|
||||
#include "lvm-flock.h"
|
||||
#include "lvm-signal.h"
|
||||
#include "locking.h"
|
||||
|
126
lib/raid/raid.c
126
lib/raid/raid.c
@@ -137,7 +137,6 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
} raid_attr_import[] = {
|
||||
{ "region_size", &seg->region_size },
|
||||
{ "stripe_size", &seg->stripe_size },
|
||||
{ "data_copies", &seg->data_copies },
|
||||
{ "writebehind", &seg->writebehind },
|
||||
{ "min_recovery_rate", &seg->min_recovery_rate },
|
||||
{ "max_recovery_rate", &seg->max_recovery_rate },
|
||||
@@ -147,10 +146,6 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
for (i = 0; i < DM_ARRAY_SIZE(raid_attr_import); i++, aip++) {
|
||||
if (dm_config_has_node(sn, aip->name)) {
|
||||
if (!dm_config_get_uint32(sn, aip->name, aip->var)) {
|
||||
if (!strcmp(aip->name, "data_copies")) {
|
||||
*aip->var = 0;
|
||||
continue;
|
||||
}
|
||||
log_error("Couldn't read '%s' for segment %s of logical volume %s.",
|
||||
aip->name, dm_config_parent_name(sn), seg->lv->name);
|
||||
return 0;
|
||||
@@ -170,9 +165,6 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (seg->data_copies < 2)
|
||||
seg->data_copies = lv_raid_data_copies(seg->segtype, seg->area_count);
|
||||
|
||||
if (seg_is_any_raid0(seg))
|
||||
seg->area_len /= seg->area_count;
|
||||
|
||||
@@ -191,31 +183,18 @@ static int _raid_text_export_raid0(const struct lv_segment *seg, struct formatte
|
||||
|
||||
static int _raid_text_export_raid(const struct lv_segment *seg, struct formatter *f)
|
||||
{
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
|
||||
if (raid0)
|
||||
outfc(f, (seg->area_count == 1) ? "# linear" : NULL,
|
||||
"stripe_count = %u", seg->area_count);
|
||||
|
||||
else {
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
if (seg_is_any_raid10(seg) && seg->data_copies > 0)
|
||||
outf(f, "data_copies = %" PRIu32, seg->data_copies);
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
}
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
|
||||
if (seg->stripe_size)
|
||||
outf(f, "stripe_size = %" PRIu32, seg->stripe_size);
|
||||
|
||||
if (!raid0) {
|
||||
if (seg_is_raid1(seg) && seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
}
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
if (seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
|
||||
return out_areas(f, seg, "raid");
|
||||
}
|
||||
@@ -237,16 +216,14 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
struct dm_tree_node *node, uint64_t len,
|
||||
uint32_t *pvmove_mirror_count __attribute__((unused)))
|
||||
{
|
||||
int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0;
|
||||
uint32_t s;
|
||||
uint64_t flags = 0;
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
uint64_t rebuilds = 0;
|
||||
uint64_t writemostly = 0;
|
||||
struct dm_tree_node_raid_params params;
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&rebuilds, 0, sizeof(rebuilds));
|
||||
memset(&writemostly, 0, sizeof(writemostly));
|
||||
|
||||
if (!seg->area_count) {
|
||||
log_error(INTERNAL_ERROR "_raid_add_target_line called "
|
||||
@@ -255,84 +232,63 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
* 253 device restriction imposed by kernel due to MD and dm-raid bitfield limitation in superblock.
|
||||
* It is not strictly a userspace limitation.
|
||||
* 64 device restriction imposed by kernel as well. It is
|
||||
* not strictly a userspace limitation.
|
||||
*/
|
||||
if (seg->area_count > DEFAULT_RAID_MAX_IMAGES) {
|
||||
log_error("Unable to handle more than %u devices in a "
|
||||
"single RAID array", DEFAULT_RAID_MAX_IMAGES);
|
||||
if (seg->area_count > 64) {
|
||||
log_error("Unable to handle more than 64 devices in a "
|
||||
"single RAID array");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
if (!raid0) {
|
||||
if (!seg->region_size) {
|
||||
log_error("Missing region size for raid segment in %s.",
|
||||
seg_lv(seg, 0)->name);
|
||||
log_error("Missing region size for mirror segment.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
uint64_t status = seg_lv(seg, s)->status;
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_REBUILD)
|
||||
rebuilds |= 1ULL << s;
|
||||
|
||||
if (status & LV_REBUILD)
|
||||
rebuilds[s/64] |= 1ULL << (s%64);
|
||||
|
||||
if (status & LV_RESHAPE_DELTA_DISKS_PLUS) {
|
||||
delta_disks++;
|
||||
delta_disks_plus++;
|
||||
} else if (status & LV_RESHAPE_DELTA_DISKS_MINUS) {
|
||||
delta_disks--;
|
||||
delta_disks_minus++;
|
||||
}
|
||||
|
||||
if (delta_disks_plus && delta_disks_minus) {
|
||||
log_error(INTERNAL_ERROR "Invalid request for delta disks minus and delta disks plus!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (status & LV_WRITEMOSTLY)
|
||||
writemostly[s/64] |= 1ULL << (s%64);
|
||||
}
|
||||
|
||||
data_offset = seg->data_offset;
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_WRITEMOSTLY)
|
||||
writemostly |= 1ULL << s;
|
||||
|
||||
if (mirror_in_sync())
|
||||
flags = DM_NOSYNC;
|
||||
}
|
||||
|
||||
params.raid_type = lvseg_name(seg);
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (seg->segtype->parity_devs) {
|
||||
if (raid0) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg->segtype->parity_devs) {
|
||||
/* RAID 4/5/6 */
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count - seg->segtype->parity_devs;
|
||||
} else if (seg_is_any_raid0(seg)) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg_is_any_raid10(seg)) {
|
||||
params.data_copies = seg->data_copies;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg_is_raid10(seg)) {
|
||||
/* RAID 10 only supports 2 mirrors now */
|
||||
params.mirrors = 2;
|
||||
params.stripes = seg->area_count / 2;
|
||||
} else {
|
||||
/* RAID 1 */
|
||||
params.mirrors = seg->data_copies;
|
||||
params.mirrors = seg->area_count;
|
||||
params.stripes = 1;
|
||||
params.writebehind = seg->writebehind;
|
||||
memcpy(params.writemostly, writemostly, sizeof(params.writemostly));
|
||||
}
|
||||
|
||||
/* RAID 0 doesn't have a bitmap, thus no region_size, rebuilds etc. */
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
if (!raid0) {
|
||||
params.region_size = seg->region_size;
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
params.rebuilds = rebuilds;
|
||||
params.writemostly = writemostly;
|
||||
params.min_recovery_rate = seg->min_recovery_rate;
|
||||
params.max_recovery_rate = seg->max_recovery_rate;
|
||||
params.delta_disks = delta_disks;
|
||||
params.data_offset = data_offset;
|
||||
}
|
||||
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (!dm_tree_node_add_raid_target_with_params(node, len, ¶ms))
|
||||
return_0;
|
||||
|
||||
@@ -494,10 +450,6 @@ static int _raid_target_present(struct cmd_context *cmd,
|
||||
else
|
||||
log_very_verbose("Target raid does not support %s.",
|
||||
SEG_TYPE_NAME_RAID4);
|
||||
|
||||
if (maj > 1 ||
|
||||
(maj == 1 && (min > 10 || (min == 10 && patchlevel >= 2))))
|
||||
_raid_attrs |= RAID_FEATURE_RESHAPE;
|
||||
}
|
||||
|
||||
if (attributes)
|
||||
|
@@ -42,7 +42,7 @@ static int _pthread_create(pthread_t *t, void *(*fun)(void *), void *arg, int st
|
||||
/*
|
||||
* We use a smaller stack since it gets preallocated in its entirety
|
||||
*/
|
||||
pthread_attr_setstacksize(&attr, stacksize + getpagesize());
|
||||
pthread_attr_setstacksize(&attr, stacksize);
|
||||
return pthread_create(t, &attr, fun, arg);
|
||||
}
|
||||
#endif
|
||||
|
@@ -331,7 +331,6 @@ struct dm_status_raid {
|
||||
char *dev_health;
|
||||
/* idle, frozen, resync, recover, check, repair */
|
||||
char *sync_action;
|
||||
uint64_t data_offset; /* RAID out-of-place reshaping */
|
||||
};
|
||||
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
@@ -1720,7 +1719,7 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t rebuilds,
|
||||
uint64_t flags);
|
||||
|
||||
/*
|
||||
@@ -1747,22 +1746,18 @@ struct dm_tree_node_raid_params {
|
||||
uint32_t region_size;
|
||||
uint32_t stripe_size;
|
||||
|
||||
int delta_disks; /* +/- number of disks to add/remove (reshaping) */
|
||||
int data_offset; /* data offset to set (out-of-place reshaping) */
|
||||
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. By choosing a 'uint64_t', we limit ourself
|
||||
* to RAID arrays with 64 devices.
|
||||
*/
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint64_t rebuilds;
|
||||
uint64_t writemostly;
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t data_copies; /* RAID # of data copies */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
|
@@ -23,8 +23,6 @@
|
||||
#define DEV_NAME(dmt) (dmt->mangled_dev_name ? : dmt->dev_name)
|
||||
#define DEV_UUID(DMT) (dmt->mangled_uuid ? : dmt->uuid)
|
||||
|
||||
#define RAID_BITMAP_SIZE 4
|
||||
|
||||
int mangle_string(const char *str, const char *str_name, size_t len,
|
||||
char *buf, size_t buf_len, dm_string_mangling_t mode);
|
||||
|
||||
|
@@ -205,14 +205,11 @@ struct load_segment {
|
||||
struct dm_tree_node *replicator;/* Replicator-dev */
|
||||
uint64_t rdevice_index; /* Replicator-dev */
|
||||
|
||||
int delta_disks; /* raid reshape number of disks */
|
||||
int data_offset; /* raid reshape data offset on disk to set */
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint64_t rebuilds; /* raid */
|
||||
uint64_t writemostly; /* raid */
|
||||
uint32_t writebehind; /* raid */
|
||||
uint32_t max_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t data_copies; /* raid10 data_copies */
|
||||
|
||||
struct dm_tree_node *metadata; /* Thin_pool + Cache */
|
||||
struct dm_tree_node *pool; /* Thin_pool, Thin */
|
||||
@@ -2356,21 +2353,16 @@ static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *s
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _2_if_value(unsigned p)
|
||||
{
|
||||
return p ? 2 : 0;
|
||||
}
|
||||
/* Is parameter non-zero? */
|
||||
#define PARAM_IS_SET(p) ((p) ? 1 : 0)
|
||||
|
||||
/* Return number of bits passed in @bits assuming 2 * 64 bit size */
|
||||
static int _get_params_count(uint64_t *bits)
|
||||
/* Return number of bits assuming 4 * 64 bit size */
|
||||
static int _get_params_count(uint64_t bits)
|
||||
{
|
||||
int r = 0;
|
||||
int i = RAID_BITMAP_SIZE;
|
||||
|
||||
while (i--) {
|
||||
r += 2 * hweight32(bits[i] & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits[i] >> 32);
|
||||
}
|
||||
r += 2 * hweight32(bits & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits >> 32);
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -2381,60 +2373,32 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
size_t paramsize)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t area_count = seg->area_count / 2;
|
||||
int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
|
||||
int pos = 0;
|
||||
unsigned type;
|
||||
|
||||
if (seg->area_count % 2)
|
||||
return 0;
|
||||
unsigned type = seg->type;
|
||||
|
||||
if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
|
||||
param_count++;
|
||||
|
||||
param_count += _2_if_value(seg->data_offset) +
|
||||
_2_if_value(seg->delta_disks) +
|
||||
_2_if_value(seg->region_size) +
|
||||
_2_if_value(seg->writebehind) +
|
||||
_2_if_value(seg->min_recovery_rate) +
|
||||
_2_if_value(seg->max_recovery_rate) +
|
||||
_2_if_value(seg->data_copies > 1);
|
||||
param_count += 2 * (PARAM_IS_SET(seg->region_size) +
|
||||
PARAM_IS_SET(seg->writebehind) +
|
||||
PARAM_IS_SET(seg->min_recovery_rate) +
|
||||
PARAM_IS_SET(seg->max_recovery_rate));
|
||||
|
||||
/* rebuilds and writemostly are BITMAP_SIZE * 64 bits */
|
||||
/* rebuilds and writemostly are 64 bits */
|
||||
param_count += _get_params_count(seg->rebuilds);
|
||||
param_count += _get_params_count(seg->writemostly);
|
||||
|
||||
if ((seg->type == SEG_RAID1) && seg->stripe_size)
|
||||
log_info("WARNING: Ignoring RAID1 stripe size");
|
||||
if ((type == SEG_RAID1) && seg->stripe_size)
|
||||
log_error("WARNING: Ignoring RAID1 stripe size");
|
||||
|
||||
/* Kernel only expects "raid0", not "raid0_meta" */
|
||||
type = seg->type;
|
||||
if (type == SEG_RAID0_META)
|
||||
type = SEG_RAID0;
|
||||
#if 0
|
||||
/* Kernel only expects "raid10", not "raid10_{far,offset}" */
|
||||
else if (type == SEG_RAID10_FAR ||
|
||||
type == SEG_RAID10_OFFSET) {
|
||||
param_count += 2;
|
||||
type = SEG_RAID10_NEAR;
|
||||
}
|
||||
#endif
|
||||
|
||||
EMIT_PARAMS(pos, "%s %d %u",
|
||||
// type == SEG_RAID10_NEAR ? "raid10" : _dm_segtypes[type].target,
|
||||
type == SEG_RAID10 ? "raid10" : _dm_segtypes[type].target,
|
||||
EMIT_PARAMS(pos, "%s %d %u", _dm_segtypes[type].target,
|
||||
param_count, seg->stripe_size);
|
||||
|
||||
#if 0
|
||||
if (seg->type == SEG_RAID10_FAR)
|
||||
EMIT_PARAMS(pos, " raid10_format far");
|
||||
else if (seg->type == SEG_RAID10_OFFSET)
|
||||
EMIT_PARAMS(pos, " raid10_format offset");
|
||||
#endif
|
||||
|
||||
if (seg->data_copies > 1 && type == SEG_RAID10)
|
||||
EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies);
|
||||
|
||||
if (seg->flags & DM_NOSYNC)
|
||||
EMIT_PARAMS(pos, " nosync");
|
||||
else if (seg->flags & DM_FORCESYNC)
|
||||
@@ -2443,38 +2407,27 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
if (seg->region_size)
|
||||
EMIT_PARAMS(pos, " region_size %u", seg->region_size);
|
||||
|
||||
/* If seg-data_offset == 1, kernel needs a zero offset to adjust to it */
|
||||
if (seg->data_offset)
|
||||
EMIT_PARAMS(pos, " data_offset %d", seg->data_offset == 1 ? 0 : seg->data_offset);
|
||||
|
||||
if (seg->delta_disks)
|
||||
EMIT_PARAMS(pos, " delta_disks %d", seg->delta_disks);
|
||||
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->rebuilds[i/64] & (1ULL << (i%64)))
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->rebuilds & (1ULL << i))
|
||||
EMIT_PARAMS(pos, " rebuild %u", i);
|
||||
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->writemostly[i/64] & (1ULL << (i%64)))
|
||||
EMIT_PARAMS(pos, " write_mostly %u", i);
|
||||
|
||||
if (seg->writebehind)
|
||||
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
|
||||
|
||||
/*
|
||||
* Has to be before "min_recovery_rate" or the kernels
|
||||
* check will fail when both set and min > previous max
|
||||
*/
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
if (seg->min_recovery_rate)
|
||||
EMIT_PARAMS(pos, " min_recovery_rate %u",
|
||||
seg->min_recovery_rate);
|
||||
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->writemostly & (1ULL << i))
|
||||
EMIT_PARAMS(pos, " write_mostly %u", i);
|
||||
|
||||
if (seg->writebehind)
|
||||
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
|
||||
|
||||
/* Print number of metadata/data device pairs */
|
||||
EMIT_PARAMS(pos, " %u", area_count);
|
||||
EMIT_PARAMS(pos, " %u", seg->area_count/2);
|
||||
|
||||
if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
|
||||
return_0;
|
||||
@@ -2921,8 +2874,8 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
else if (child->props.size_changed < 0)
|
||||
dnode->props.size_changed = -1;
|
||||
|
||||
/* No resume for a device without parents or with unchanged or smaller size */
|
||||
if (!dm_tree_node_num_children(child, 1) || (child->props.size_changed <= 0))
|
||||
/* Resume device immediately if it has parents and its size changed */
|
||||
if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
|
||||
continue;
|
||||
|
||||
if (!node_created && (dm_list_size(&child->props.segs) == 1)) {
|
||||
@@ -3314,14 +3267,11 @@ int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->delta_disks = p->delta_disks;
|
||||
seg->data_offset = p->data_offset;
|
||||
memcpy(seg->rebuilds, p->rebuilds, sizeof(seg->rebuilds));
|
||||
memcpy(seg->writemostly, p->writemostly, sizeof(seg->writemostly));
|
||||
seg->rebuilds = p->rebuilds;
|
||||
seg->writemostly = p->writemostly;
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
seg->data_copies = p->data_copies;
|
||||
seg->flags = p->flags;
|
||||
|
||||
return 1;
|
||||
@@ -3332,18 +3282,17 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t rebuilds,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct dm_tree_node_raid_params params = {
|
||||
.raid_type = raid_type,
|
||||
.region_size = region_size,
|
||||
.stripe_size = stripe_size,
|
||||
.rebuilds = rebuilds,
|
||||
.flags = flags
|
||||
};
|
||||
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
|
||||
return dm_tree_node_add_raid_target_with_params(node, size, ¶ms);
|
||||
}
|
||||
|
||||
|
@@ -3062,31 +3062,26 @@ static void _get_final_time(time_range_t range, struct tm *tm,
|
||||
tm_up.tm_sec += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_MINUTE:
|
||||
if (tm_up.tm_min < 59) {
|
||||
tm_up.tm_min += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_HOUR:
|
||||
if (tm_up.tm_hour < 23) {
|
||||
tm_up.tm_hour += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_DAY:
|
||||
if (tm_up.tm_mday < _get_days_in_month(tm_up.tm_mon, tm_up.tm_year)) {
|
||||
tm_up.tm_mday += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_MONTH:
|
||||
if (tm_up.tm_mon < 11) {
|
||||
tm_up.tm_mon += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_YEAR:
|
||||
tm_up.tm_year += 1;
|
||||
break;
|
||||
@@ -4209,7 +4204,7 @@ static void _recalculate_fields(struct dm_report *rh)
|
||||
{
|
||||
struct row *row;
|
||||
struct dm_report_field *field;
|
||||
int len;
|
||||
size_t len;
|
||||
|
||||
dm_list_iterate_items(row, &rh->rows) {
|
||||
dm_list_iterate_items(field, &row->fields) {
|
||||
|
@@ -402,7 +402,7 @@ static int _stats_bound(const struct dm_stats *dms)
|
||||
if (dms->bind_major > 0 || dms->bind_name || dms->bind_uuid)
|
||||
return 1;
|
||||
/* %p format specifier expects a void pointer. */
|
||||
log_debug("Stats handle at %p is not bound.", dms);
|
||||
log_debug("Stats handle at %p is not bound.", (void *) dms);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3294,7 +3294,7 @@ static void _sum_histogram_bins(const struct dm_stats *dms,
|
||||
struct dm_stats_region *region;
|
||||
struct dm_histogram_bin *bins;
|
||||
struct dm_histogram *dmh_cur;
|
||||
int bin;
|
||||
uint64_t bin;
|
||||
|
||||
region = &dms->regions[region_id];
|
||||
dmh_cur = region->counters[area_id].histogram;
|
||||
@@ -3857,9 +3857,9 @@ struct _extent {
|
||||
*/
|
||||
static int _extent_start_compare(const void *p1, const void *p2)
|
||||
{
|
||||
const struct _extent *r1, *r2;
|
||||
r1 = (const struct _extent *) p1;
|
||||
r2 = (const struct _extent *) p2;
|
||||
struct _extent *r1, *r2;
|
||||
r1 = (struct _extent *) p1;
|
||||
r2 = (struct _extent *) p2;
|
||||
|
||||
if (r1->start < r2->start)
|
||||
return -1;
|
||||
@@ -3868,6 +3868,37 @@ static int _extent_start_compare(const void *p1, const void *p2)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resize the group bitmap corresponding to group_id so that it can
|
||||
* contain at least num_regions members.
|
||||
*/
|
||||
static int _stats_resize_group(struct dm_stats_group *group, int num_regions)
|
||||
{
|
||||
int last_bit = dm_bit_get_last(group->regions);
|
||||
dm_bitset_t new, old;
|
||||
|
||||
if (last_bit >= num_regions) {
|
||||
log_error("Cannot resize group bitmap to %d with bit %d set.",
|
||||
num_regions, last_bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_very_verbose("Resizing group bitmap from %d to %d (last_bit: %d).",
|
||||
group->regions[0], num_regions, last_bit);
|
||||
|
||||
new = dm_bitset_create(NULL, num_regions);
|
||||
if (!new) {
|
||||
log_error("Could not allocate memory for new group bitmap.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
old = group->regions;
|
||||
dm_bit_copy(new, old);
|
||||
group->regions = new;
|
||||
dm_bitset_destroy(old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _stats_create_group(struct dm_stats *dms, dm_bitset_t regions,
|
||||
const char *alias, uint64_t *group_id)
|
||||
{
|
||||
@@ -3972,7 +4003,7 @@ merge:
|
||||
static void _stats_copy_histogram_bounds(struct dm_histogram *to,
|
||||
struct dm_histogram *from)
|
||||
{
|
||||
int i;
|
||||
uint64_t i;
|
||||
|
||||
to->nr_bins = from->nr_bins;
|
||||
|
||||
@@ -3988,7 +4019,7 @@ static void _stats_copy_histogram_bounds(struct dm_histogram *to,
|
||||
static int _stats_check_histogram_bounds(struct dm_histogram *h1,
|
||||
struct dm_histogram *h2)
|
||||
{
|
||||
int i;
|
||||
uint64_t i;
|
||||
|
||||
if (!h1 || !h2)
|
||||
return 0;
|
||||
@@ -4171,37 +4202,6 @@ int dm_stats_get_group_descriptor(const struct dm_stats *dms,
|
||||
}
|
||||
|
||||
#ifdef HAVE_LINUX_FIEMAP_H
|
||||
/*
|
||||
* Resize the group bitmap corresponding to group_id so that it can
|
||||
* contain at least num_regions members.
|
||||
*/
|
||||
static int _stats_resize_group(struct dm_stats_group *group, int num_regions)
|
||||
{
|
||||
int last_bit = dm_bit_get_last(group->regions);
|
||||
dm_bitset_t new, old;
|
||||
|
||||
if (last_bit >= num_regions) {
|
||||
log_error("Cannot resize group bitmap to %d with bit %d set.",
|
||||
num_regions, last_bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_very_verbose("Resizing group bitmap from %d to %d (last_bit: %d).",
|
||||
group->regions[0], num_regions, last_bit);
|
||||
|
||||
new = dm_bitset_create(NULL, num_regions);
|
||||
if (!new) {
|
||||
log_error("Could not allocate memory for new group bitmap.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
old = group->regions;
|
||||
dm_bit_copy(new, old);
|
||||
group->regions = new;
|
||||
dm_bitset_destroy(old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Group a table of region_ids corresponding to the extents of a file.
|
||||
*/
|
||||
@@ -4557,7 +4557,7 @@ static int _stats_unmap_regions(struct dm_stats *dms, uint64_t group_id,
|
||||
log_error("Could not finalize region extent table.");
|
||||
goto out;
|
||||
}
|
||||
log_very_verbose("Kept " FMTi64 " of " FMTi64 " old extents",
|
||||
log_very_verbose("Kept %ld of %ld old extents",
|
||||
nr_kept, nr_old);
|
||||
log_very_verbose("Found " FMTu64 " new extents",
|
||||
*count - nr_kept);
|
||||
@@ -4725,7 +4725,7 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
|
||||
dm_pool_free(extent_mem, extents);
|
||||
dm_pool_destroy(extent_mem);
|
||||
|
||||
dm_free(hist_arg);
|
||||
return regions;
|
||||
|
||||
out_remove:
|
||||
@@ -4842,7 +4842,7 @@ uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
if (!bounds) {
|
||||
log_error("Could not allocate memory for group "
|
||||
"histogram bounds.");
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
_stats_copy_histogram_bounds(bounds,
|
||||
dms->regions[group_id].bounds);
|
||||
@@ -4869,8 +4869,6 @@ uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
bad:
|
||||
_stats_cleanup_region_ids(dms, regions, count);
|
||||
dm_free(bounds);
|
||||
dm_free(regions);
|
||||
out:
|
||||
dm_free((char *) alias);
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -626,7 +626,7 @@ uint64_t dm_units_to_factor(const char *units, char *unit_type,
|
||||
uint64_t multiplier;
|
||||
|
||||
if (endptr)
|
||||
*endptr = units;
|
||||
*endptr = (char *) units;
|
||||
|
||||
if (isdigit(*units)) {
|
||||
custom_value = strtod(units, &ptr);
|
||||
@@ -709,7 +709,7 @@ uint64_t dm_units_to_factor(const char *units, char *unit_type,
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = units + 1;
|
||||
*endptr = (char *) units + 1;
|
||||
|
||||
if (_close_enough(custom_value, 0.))
|
||||
return v * multiplier; /* Use integer arithmetic */
|
||||
|
@@ -89,8 +89,6 @@ static unsigned _count_fields(const char *p)
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio>
|
||||
* Versions 1.5.0+ (6 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt>
|
||||
* Versions 1.9.0+ (7 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt> <data_offset>
|
||||
*/
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_raid **status)
|
||||
@@ -149,22 +147,6 @@ int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
if (sscanf(p, "%s %" PRIu64, s->sync_action, &s->mismatch_count) != 2)
|
||||
goto_bad;
|
||||
|
||||
if (num_fields < 7)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* All pre-1.9.0 version parameters are read. Now we check
|
||||
* for additional 1.9.0+ parameters (i.e. nr_fields at least 7).
|
||||
*
|
||||
* Note that data_offset will be 0 if the
|
||||
* kernel returns a pre-1.9.0 status.
|
||||
*/
|
||||
msg_fields = "<data_offset>";
|
||||
if (!(p = _skip_fields(params, 6))) /* skip pre-1.9.0 params */
|
||||
goto bad;
|
||||
if (sscanf(p, "%" PRIu64, &s->data_offset) != 1)
|
||||
goto bad;
|
||||
|
||||
out:
|
||||
*status = s;
|
||||
|
||||
|
@@ -40,11 +40,6 @@ SED = @SED@
|
||||
CFLOW_CMD = @CFLOW_CMD@
|
||||
AWK = @AWK@
|
||||
CHMOD = @CHMOD@
|
||||
EGREP = @EGREP@
|
||||
GREP = @GREP@
|
||||
SORT = @SORT@
|
||||
WC = @WC@
|
||||
|
||||
PYTHON2 = @PYTHON2@
|
||||
PYTHON3 = @PYTHON3@
|
||||
PYCOMPILE = $(top_srcdir)/autoconf/py-compile
|
||||
@@ -517,9 +512,9 @@ ifeq (,$(firstword $(EXPORTED_SYMBOLS)))
|
||||
) > $@
|
||||
else
|
||||
set -e;\
|
||||
R=$$($(SORT) $^ | uniq -u);\
|
||||
R=$$(sort $^ | uniq -u);\
|
||||
test -z "$$R" || { echo "Mismatch between symbols in shared library and lists in .exported_symbols.* files: $$R"; false; } ;\
|
||||
( for i in $$(echo $(EXPORTED_SYMBOLS) | tr ' ' '\n' | $(SORT) -rnt_ -k5 ); do\
|
||||
( for i in $$(echo $(EXPORTED_SYMBOLS) | tr ' ' '\n' | sort -rnt_ -k5 ); do\
|
||||
echo "$${i##*.} {"; echo " global:";\
|
||||
$(SED) "s/^/ /;s/$$/;/" $$i;\
|
||||
echo "};";\
|
||||
|
@@ -144,12 +144,10 @@ Makefile: Makefile.in
|
||||
|
||||
man-generator:
|
||||
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
|
||||
- ./man-generator lvmconfig > test.gen
|
||||
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
|
||||
|
||||
$(MAN8GEN): man-generator
|
||||
echo "Generating $@" ;
|
||||
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
|
||||
./man-generator $(basename $@) > $@.gen
|
||||
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
|
||||
cat $(top_srcdir)/man/see_also.end >> $@.gen
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
|
||||
|
@@ -23,6 +23,40 @@ dmeventd is the event monitoring daemon for device-mapper devices.
|
||||
Library plugins can register and carry out actions triggered when
|
||||
particular events occur.
|
||||
.
|
||||
.SH LVM PLUGINS
|
||||
.
|
||||
.HP
|
||||
.IR Mirror
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.IR Raid
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.IR Snapshot
|
||||
.br
|
||||
Monitors how full a snapshot is becoming and emits a warning to
|
||||
syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the snapshot is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
Snapshot which runs out of space gets invalid and when it is mounted,
|
||||
it gets umounted if possible.
|
||||
.
|
||||
.HP
|
||||
.IR Thin
|
||||
.br
|
||||
Monitors how full a thin pool data and metadata is becoming and emits
|
||||
a warning to syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the thin pool is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
If the thin-pool runs out of space, thin volumes are umounted if possible.
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
@@ -70,80 +104,6 @@ events to monitor from the currently running daemon.
|
||||
.br
|
||||
Show version of dmeventd.
|
||||
.
|
||||
.SH LVM PLUGINS
|
||||
.
|
||||
.HP
|
||||
.BR Mirror
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.BR Raid
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.BR Snapshot
|
||||
.br
|
||||
Monitors how full a snapshot is becoming and emits a warning to
|
||||
syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the snapshot is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
Snapshot which runs out of space gets invalid and when it is mounted,
|
||||
it gets umounted if possible.
|
||||
.
|
||||
.HP
|
||||
.BR Thin
|
||||
.br
|
||||
Monitors how full a thin pool data and metadata is becoming and emits
|
||||
a warning to syslog when it exceeds 80% full.
|
||||
The warning is repeated when more then 85%, 90% and 95%
|
||||
of the thin pool is filled. See
|
||||
.BR lvm.conf (5).
|
||||
When a thin pool fills over 50% (data or metadata) thin plugin calls
|
||||
configured \fIdmeventd/thin_command\fP with every 5% increase.
|
||||
With default setting it calls internal
|
||||
\fBlvm lvextend --use-policies\fP to resize thin pool
|
||||
when it's been filled above configured threshold
|
||||
\fIactivation/thin_pool_autoextend_threshold\fP.
|
||||
If the command fails, dmeventd thin plugin will keep
|
||||
retrying execution with increasing time delay between
|
||||
retries upto 42 minutes.
|
||||
User may also configure external command to support more advanced
|
||||
maintenance operations of a thin pool.
|
||||
Such external command can e.g. remove some unneeded snapshots,
|
||||
use \fBfstrim\fP(8) to free recover space in a thin pool,
|
||||
but also can use \fBlvextend --use-policies\fP if other actions
|
||||
have not released enough space.
|
||||
Command is executed with environmental variable
|
||||
\fBLVM_RUN_BY_DMEVENTD=1\fP so any lvm2 command executed
|
||||
in this environment will not try to interact with dmeventd.
|
||||
To see the fullness of a thin pool command may check these
|
||||
two environmental variables
|
||||
\fBDMEVENTD_THIN_POOL_DATA\fP and \fBDMEVENTD_THIN_POOL_DATA\fP.
|
||||
Command can also read status with tools like \fBlvs\fP(8).
|
||||
.
|
||||
.SH ENVIRONMENT VARIABLES
|
||||
.
|
||||
.TP
|
||||
.B DMEVENTD_THIN_POOL_DATA
|
||||
Variable is set by thin plugin and is available to executed program. Value present
|
||||
actual usage of thin pool data volume. Variable is not set when error event
|
||||
is processed.
|
||||
.TP
|
||||
.B DMEVENTD_THIN_POOL_DATA
|
||||
Variable is set by thin plugin and is available to executed program. Value present
|
||||
actual usage of thin pool metadata volume. Variable is not set when error event
|
||||
is processed.
|
||||
.TP
|
||||
.B LVM_RUN_BY_DMEVENTD
|
||||
Variable is set by thin plugin to prohibit recursive interation
|
||||
with dmeventd by any executed lvm2 command from
|
||||
a thin_command environment.
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.BR lvm (8),
|
||||
|
@@ -27,9 +27,9 @@ dmsetup \(em low level logical volume management
|
||||
. IR uuid ]
|
||||
. RB \%[ \-\-addnodeoncreate | \-\-addnodeonresume ]
|
||||
. RB \%[ \-n | \-\-notable | \-\-table
|
||||
. IR \%table | table_file ]
|
||||
. RI \%{ table | table_file }]
|
||||
. RB [ \-\-readahead
|
||||
. RB \%[ + ] \fIsectors | auto | none ]
|
||||
. RB \%{[ + ] \fIsectors | auto | none }]
|
||||
. ad b
|
||||
..
|
||||
.CMD_CREATE
|
||||
@@ -41,7 +41,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR deps
|
||||
. RB [ \-o
|
||||
. IR options ]
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_DEPS
|
||||
@@ -58,7 +58,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_INFO
|
||||
. BR info
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
..
|
||||
.CMD_INFO
|
||||
.
|
||||
@@ -92,7 +92,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR load
|
||||
. IR device_name
|
||||
. RB [ \-\-table
|
||||
. IR table | table_file ]
|
||||
. RI { table | table_file }]
|
||||
. ad b
|
||||
..
|
||||
.CMD_LOAD
|
||||
@@ -117,7 +117,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_MANGLE
|
||||
. BR mangle
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
..
|
||||
.CMD_MANGLE
|
||||
.
|
||||
@@ -135,7 +135,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_MKNODES
|
||||
. BR mknodes
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
..
|
||||
.CMD_MKNODES
|
||||
.
|
||||
@@ -146,7 +146,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR reload
|
||||
. IR device_name
|
||||
. RB [ \-\-table
|
||||
. IR table | table_file ]
|
||||
. RI { table | table_file }]
|
||||
. ad b
|
||||
..
|
||||
.CMD_RELOAD
|
||||
@@ -159,7 +159,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-f | \-\-force ]
|
||||
. RB [ \-\-retry ]
|
||||
. RB [ \-\-deferred ]
|
||||
. IR device_name ...
|
||||
. IR device_name
|
||||
. ad b
|
||||
..
|
||||
.CMD_REMOVE
|
||||
@@ -197,12 +197,12 @@ dmsetup \(em low level logical volume management
|
||||
.de CMD_RESUME
|
||||
. ad l
|
||||
. BR resume
|
||||
. IR device_name ...
|
||||
. IR device_name
|
||||
. RB [ \-\-addnodeoncreate | \-\-addnodeonresume ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RB [ \-\-nolockfs ]
|
||||
. RB \%[ \-\-readahead
|
||||
. RB \%[ + ] \fIsectors | auto | none ]
|
||||
. RB \%{[ + ] \fIsectors | auto | none }]
|
||||
. ad b
|
||||
..
|
||||
.CMD_RESUME
|
||||
@@ -247,7 +247,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-\-target
|
||||
. IR target_type ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_STATUS
|
||||
@@ -259,7 +259,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR suspend
|
||||
. RB [ \-\-nolockfs ]
|
||||
. RB [ \-\-noflush ]
|
||||
. IR device_name ...
|
||||
. IR device_name
|
||||
. ad b
|
||||
..
|
||||
.CMD_SUSPEND
|
||||
@@ -272,7 +272,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-\-target
|
||||
. IR target_type ]
|
||||
. RB [ \-\-showkeys ]
|
||||
. RI [ device_name ...]
|
||||
. RI [ device_name ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_TABLE
|
||||
@@ -354,7 +354,7 @@ dmsetup \(em low level logical volume management
|
||||
.de CMD_WIPE_TABLE
|
||||
. ad l
|
||||
. BR wipe_table
|
||||
. IR device_name ...
|
||||
. IR device_name
|
||||
. RB [ \-f | \-\-force ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RB [ \-\-nolockfs ]
|
||||
@@ -447,7 +447,7 @@ The default interval is one second.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-manglename
|
||||
.BR auto | hex | none
|
||||
.RB { auto | hex | none }
|
||||
.br
|
||||
Mangle any character not on a whitelist using mangling_mode when
|
||||
processing device-mapper device names and UUIDs. The names and UUIDs
|
||||
@@ -529,7 +529,7 @@ Specify which fields to display.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-readahead
|
||||
.RB [ + ] \fIsectors | auto | none
|
||||
.RB {[ + ] \fIsectors | auto | none }
|
||||
.br
|
||||
Specify read ahead size in units of sectors.
|
||||
The default value is \fBauto\fP which allows the kernel to choose
|
||||
@@ -820,10 +820,8 @@ Outputs the current table for the device in a format that can be fed
|
||||
back in using the create or load commands.
|
||||
With \fB\-\-target\fP, only information relating to the specified target type
|
||||
is displayed.
|
||||
Real encryption keys are suppressed in the table output for the crypt
|
||||
target unless the \fB\-\-showkeys\fP parameter is supplied. Kernel key
|
||||
references prefixed with \fB:\fP are not affected by the parameter and get
|
||||
displayed always.
|
||||
Encryption keys are suppressed in the table output for the crypt
|
||||
target unless the \fB\-\-showkeys\fP parameter is supplied.
|
||||
.
|
||||
.HP
|
||||
.CMD_TARGETS
|
||||
|
@@ -44,7 +44,7 @@ dmstats \(em device-mapper statistics management
|
||||
.B dmsetup
|
||||
.B stats
|
||||
.I command
|
||||
[OPTIONS]
|
||||
.RB [ options ]
|
||||
.sp
|
||||
.
|
||||
.PD 0
|
||||
@@ -53,13 +53,13 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_COMMAND
|
||||
. ad l
|
||||
. IR command
|
||||
. IR device_name " |"
|
||||
. BR \-\-major
|
||||
. RI [ device_name |
|
||||
. RB [ \-u | \-\-uuid
|
||||
. IR uuid ]
|
||||
. RB | [ \-\-major
|
||||
. IR major
|
||||
. BR \-\-minor
|
||||
. IR minor " |"
|
||||
. BR \-u | \-\-uuid
|
||||
. IR uuid
|
||||
. IR minor ]
|
||||
. RB \%[ \-v | \-\-verbose]
|
||||
. ad b
|
||||
..
|
||||
@@ -82,7 +82,9 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_CREATE
|
||||
. ad l
|
||||
. BR create
|
||||
. IR device_name... | file_path... | \fB\-\-alldevices
|
||||
. RB [ device_name...
|
||||
. RB | file_path...
|
||||
. RB | [ \-\-alldevices ]]
|
||||
. RB [ \-\-areas
|
||||
. IR nr_areas | \fB\-\-areasize
|
||||
. IR area_size ]
|
||||
@@ -108,7 +110,8 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_DELETE
|
||||
. ad l
|
||||
. BR delete
|
||||
. IR device_name | \fB\-\-alldevices
|
||||
. RI [ device_name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. OPT_PROGRAMS
|
||||
. OPT_REGIONS
|
||||
. ad b
|
||||
@@ -120,9 +123,10 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_GROUP
|
||||
. ad l
|
||||
. BR group
|
||||
. RI [ device_name | \fB\-\-alldevices ]
|
||||
. RI [ device_name ]
|
||||
. RB [ \-\-alias
|
||||
. IR name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. RB [ \-\-regions
|
||||
. IR regions ]
|
||||
. ad b
|
||||
@@ -201,7 +205,8 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_UNGROUP
|
||||
. ad l
|
||||
. BR ungroup
|
||||
. RI [ device_name | \fB\-\-alldevices ]
|
||||
. RI [ device_name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. RB [ \-\-groupid
|
||||
. IR id ]
|
||||
. ad b
|
||||
@@ -212,7 +217,7 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_UPDATE_FILEMAP
|
||||
. ad l
|
||||
. BR update_filemap
|
||||
. IR file_path
|
||||
. RI file_path
|
||||
. RB [ \-\-groupid
|
||||
. IR id ]
|
||||
. ad b
|
||||
|
@@ -1,4 +1,4 @@
|
||||
.SH EXAMPLES
|
||||
.EXAMPLES
|
||||
|
||||
Change LV permission to read-only:
|
||||
.sp
|
||||
|
@@ -7,6 +7,21 @@ To display the current LV type, run the command:
|
||||
.B lvs \-o name,segtype
|
||||
.I LV
|
||||
|
||||
A command to change the LV type uses the general pattern:
|
||||
|
||||
.B lvconvert \-\-type
|
||||
.I NewType LV
|
||||
|
||||
LVs with the following types can be modified by lvconvert:
|
||||
.B striped,
|
||||
.B snapshot,
|
||||
.B mirror,
|
||||
.B raid*,
|
||||
.B thin,
|
||||
.B cache,
|
||||
.B thin\-pool,
|
||||
.B cache\-pool.
|
||||
|
||||
The
|
||||
.B linear
|
||||
type is equivalent to the
|
||||
@@ -20,6 +35,12 @@ type is deprecated and the
|
||||
.B raid1
|
||||
type should be used. They are both implementations of mirroring.
|
||||
|
||||
The
|
||||
.B raid*
|
||||
type refers to one of many raid levels, e.g.
|
||||
.B raid1,
|
||||
.B raid5.
|
||||
|
||||
In some cases, an LV is a single device mapper (dm) layer above physical
|
||||
devices. In other cases, hidden LVs (dm devices) are layered between the
|
||||
visible LV and physical devices. LVs in the middle layers are called sub LVs.
|
||||
|
@@ -1,85 +1,64 @@
|
||||
.SH NOTES
|
||||
|
||||
This previous command syntax would perform two different operations:
|
||||
.br
|
||||
\fBlvconvert --thinpool\fP \fILV1\fP \fB--poolmetadata\fP \fILV2\fP
|
||||
.br
|
||||
If LV1 was not a thin pool, the command would convert LV1 to
|
||||
a thin pool, optionally using a specified LV for metadata.
|
||||
But, if LV1 was already a thin pool, the command would swap
|
||||
the current metadata LV with LV2 (for repair purposes.)
|
||||
|
||||
In the same way, this previous command syntax would perform two different
|
||||
operations:
|
||||
.br
|
||||
\fBlvconvert --cachepool\fP \fILV1\fP \fB--poolmetadata\fP \fILV2\fP
|
||||
.br
|
||||
If LV1 was not a cache pool, the command would convert LV1 to
|
||||
a cache pool, optionally using a specified LV for metadata.
|
||||
But, if LV1 was already a cache pool, the command would swap
|
||||
the current metadata LV with LV2 (for repair purposes.)
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Convert a linear LV to a two-way mirror LV.
|
||||
Convert a linear LV to a two-way mirror LV:
|
||||
.br
|
||||
.B lvconvert \-\-type mirror \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way RAID1 LV.
|
||||
Convert a linear LV to a two-way RAID1 LV:
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a mirror LV to use an in\-memory log.
|
||||
Convert a mirror LV to use an in\-memory log:
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog core vg/lvol1
|
||||
|
||||
Convert a mirror LV to use a disk log.
|
||||
Convert a mirror LV to use a disk log:
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog disk vg/lvol1
|
||||
|
||||
Convert a mirror or raid1 LV to a linear LV.
|
||||
Convert a mirror or raid1 LV to a linear LV:
|
||||
.br
|
||||
.B lvconvert --type linear vg/lvol1
|
||||
|
||||
Convert a mirror LV to a raid1 LV with the same number of images.
|
||||
Convert a mirror LV to a raid1 LV with the same number of images:
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way mirror LV, allocating new extents from specific
|
||||
PV ranges.
|
||||
PV ranges:
|
||||
.br
|
||||
.B lvconvert \-\-mirrors 1 vg/lvol1 /dev/sda:0\-15 /dev/sdb:0\-15
|
||||
|
||||
Convert a mirror LV to a linear LV, freeing physical extents from a specific PV.
|
||||
Convert a mirror LV to a linear LV, freeing physical extents from a specific PV:
|
||||
.br
|
||||
.B lvconvert \-\-type linear vg/lvol1 /dev/sda
|
||||
|
||||
Split one image from a mirror or raid1 LV, making it a new LV.
|
||||
Split one image from a mirror or raid1 LV, making it a new LV:
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-name lv_split vg/lvol1
|
||||
|
||||
Split one image from a raid1 LV, and track changes made to the raid1 LV
|
||||
while the split image remains detached.
|
||||
while the split image remains detached:
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-trackchanges vg/lvol1
|
||||
|
||||
Merge an image (that was previously created with \-\-splitmirrors and
|
||||
\-\-trackchanges) back into the original raid1 LV.
|
||||
\-\-trackchanges) back into the original raid1 LV:
|
||||
.br
|
||||
.B lvconvert \-\-mergemirrors vg/lvol1_rimage_1
|
||||
|
||||
Replace PV /dev/sdb1 with PV /dev/sdf1 in a raid1/4/5/6/10 LV.
|
||||
Replace PV /dev/sdb1 with PV /dev/sdf1 in a raid1/4/5/6/10 LV:
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 vg/lvol1 /dev/sdf1
|
||||
|
||||
Replace 3 PVs /dev/sd[b-d]1 with PVs /dev/sd[f-h]1 in a raid1 LV.
|
||||
Replace 3 PVs /dev/sd[b-d]1 with PVs /dev/sd[f-h]1 in a raid1 LV:
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 \-\-replace /dev/sdd1
|
||||
.RS
|
||||
.B vg/lvol1 /dev/sd[fgh]1
|
||||
.RE
|
||||
|
||||
Replace the maximum of 2 PVs /dev/sd[bc]1 with PVs /dev/sd[gh]1 in a raid6 LV.
|
||||
Replace the maximum of 2 PVs /dev/sd[bc]1 with PVs /dev/sd[gh]1 in a raid6 LV:
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 vg/lvol1 /dev/sd[gh]1
|
||||
|
||||
@@ -90,7 +69,7 @@ is used as an external read\-only origin for the new thin LV.
|
||||
|
||||
Convert an LV into a thin LV in the specified thin pool. The existing LV
|
||||
is used as an external read\-only origin for the new thin LV, and is
|
||||
renamed "external".
|
||||
renamed "external":
|
||||
.br
|
||||
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1
|
||||
.RS
|
||||
@@ -98,19 +77,19 @@ renamed "external".
|
||||
.RE
|
||||
|
||||
Convert an LV to a cache pool LV using another specified LV for cache pool
|
||||
metadata.
|
||||
metadata:
|
||||
.br
|
||||
.B lvconvert \-\-type cache-pool \-\-poolmetadata vg/poolmeta1 vg/lvol1
|
||||
|
||||
Convert an LV to a cache LV using the specified cache pool and chunk size.
|
||||
Convert an LV to a cache LV using the specified cache pool and chunk size:
|
||||
.br
|
||||
.B lvconvert \-\-type cache \-\-cachepool vg/cpool1 \-c 128 vg/lvol1
|
||||
|
||||
Detach and keep the cache pool from a cache LV.
|
||||
Detach and keep the cache pool from a cache LV:
|
||||
.br
|
||||
.B lvconvert \-\-splitcache vg/lvol1
|
||||
|
||||
Detach and remove the cache pool from a cache LV.
|
||||
Detach and remove the cache pool from a cache LV:
|
||||
.br
|
||||
.B lvconvert \-\-uncache vg/lvol1
|
||||
|
||||
|
@@ -26,14 +26,3 @@ virtual size rather than a physical size. A cache LV is the combination of
|
||||
a standard LV with a cache pool, used to cache active portions of the LV
|
||||
to improve performance.
|
||||
|
||||
.SS Usage notes
|
||||
|
||||
In the usage section below, \fB--size\fP \fINumber\fP can be replaced
|
||||
in each case with \fB--extents\fP \fINumberExtents\fP. Also see both
|
||||
descriptions the options section.
|
||||
|
||||
In the usage section below, \fB--name\fP is omitted from the required
|
||||
options, even though it is typically used. When the name is not
|
||||
specified, a new LV name is generated with the "lvol" prefix and a unique
|
||||
numeric suffix. Also see the description in the options section.
|
||||
|
||||
|
@@ -1,5 +0,0 @@
|
||||
This command is the same as \fBlvmconfig\fP(8).
|
||||
|
||||
lvm config produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
@@ -1,5 +0,0 @@
|
||||
This command is the same as \fBlvmconfig\fP(8).
|
||||
|
||||
lvm dumpconfig produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
@@ -1,5 +1,6 @@
|
||||
.SH NOTES
|
||||
|
||||
.IP \[bu] 3
|
||||
To find the name of the pvmove LV that was created by an original
|
||||
\fBpvmove /dev/name\fP command, use the command:
|
||||
.br
|
||||
@@ -7,27 +8,27 @@ To find the name of the pvmove LV that was created by an original
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Continue polling a pvmove operation.
|
||||
Continue polling a pvmove operation:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove vg00/pvmove0
|
||||
|
||||
Abort a pvmove operation.
|
||||
Abort a pvmove operation:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove --abort vg00/pvmove0
|
||||
|
||||
Continue polling a mirror conversion.
|
||||
Continue polling a mirror conversion:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg00/lvmirror
|
||||
|
||||
Continue mirror repair.
|
||||
Continue mirror repair:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg/damaged_mirror --handlemissingpvs
|
||||
|
||||
Continue snapshot merge.
|
||||
Continue snapshot merge:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge vg/snapshot_old
|
||||
|
||||
Continue thin snapshot merge.
|
||||
Continue thin snapshot merge:
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge_thin vg/thin_snapshot
|
||||
|
||||
|
@@ -1,3 +0,0 @@
|
||||
lvmsadc is not currently supported in LVM. The device-mapper statistics
|
||||
facility provides similar performance metrics using the \fBdmstats(8)\fP
|
||||
command.
|
@@ -1,3 +0,0 @@
|
||||
lvmsar is not currently supported in LVM. The device-mapper statistics
|
||||
facility provides similar performance metrics using the \fBdmstats(8)\fP
|
||||
command.
|
@@ -278,6 +278,22 @@ or vgchange to activate thin snapshots with the "k" attribute.
|
||||
|
||||
\&
|
||||
|
||||
.SS Alternate syntax for specifying type thin\-pool
|
||||
|
||||
\&
|
||||
|
||||
The fully specified syntax for creating a thin pool LV shown above is:
|
||||
|
||||
.B lvconvert \-\-type thin-pool \-\-poolmetadata VG/ThinMetaLV VG/ThinDataLV
|
||||
|
||||
An alternate syntax may be used for the same operation:
|
||||
|
||||
.B lvconvert \-\-thinpool VG/ThinDataLV \-\-poolmetadata VG/ThinMetaLV
|
||||
|
||||
The thin-pool type is inferred by lvm; the \-\-thinpool option is not an
|
||||
alias for \-\-type thin\-pool.
|
||||
|
||||
|
||||
.SS Automatic pool metadata LV
|
||||
|
||||
\&
|
||||
|
@@ -6,11 +6,6 @@ removal. LVs cannot be deactivated or removed while they are open (e.g.
|
||||
if they contain a mounted filesystem). Removing an origin LV will also
|
||||
remove all dependent snapshots.
|
||||
|
||||
When a single force option is used, LVs are removed without confirmation,
|
||||
and the command will try to deactivate unused LVs.
|
||||
|
||||
To remove damaged LVs, two force options may be required (\fB-ff\fP).
|
||||
|
||||
\fBHistorical LVs\fP
|
||||
|
||||
If the configuration setting \fBmetadata/record_lvs_history\fP is enabled
|
||||
|
@@ -16,6 +16,3 @@ data on that disk. This can be done by zeroing the first sector with:
|
||||
Use \fBvgcreate\fP(8) to create a new VG on the PV, or \fBvgextend\fP(8)
|
||||
to add the PV to existing VG.
|
||||
|
||||
The force option will create a PV without confirmation. Repeating the
|
||||
force option (\fB-ff\fP) will forcibly create a PV, overriding checks that
|
||||
normally prevent it, e.g. if the PV is already in a VG.
|
||||
|
@@ -1,16 +0,0 @@
|
||||
pvmove moves the allocated physical extents (PEs) on a source PV to one or
|
||||
more destination PVs. You can optionally specify a source LV in which
|
||||
case only extents used by that LV will be moved to free (or specified)
|
||||
extents on the destination PV. If no destination PV is specified, the
|
||||
normal allocation rules for the VG are used.
|
||||
|
||||
If pvmove is interrupted for any reason (e.g. the machine crashes) then
|
||||
run pvmove again without any PV arguments to restart any operations that
|
||||
were in progress from the last checkpoint. Alternatively, use the abort
|
||||
option at any time to abort the operation. The resulting location of LVs
|
||||
after an abort depends on whether the atomic option was used.
|
||||
|
||||
More than one pvmove can run concurrently if they are moving data from
|
||||
different source PVs, but additional pvmoves will ignore any LVs already
|
||||
in the process of being changed, so some data might not get moved.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
.SH NOTES
|
||||
|
||||
pvmove works as follows:
|
||||
.
|
||||
\fBpvmove\fP works as follows:
|
||||
|
||||
1. A temporary 'pvmove' LV is created to store details of all the data
|
||||
movements required.
|
||||
@@ -9,7 +9,7 @@ movements required.
|
||||
according to the command line arguments.
|
||||
For each piece of data found, a new segment is added to the end of the
|
||||
pvmove LV.
|
||||
This segment takes the form of a temporary mirror to copy the data
|
||||
This segment takes the form of a temporary mirror to copy the data
|
||||
from the original location to a newly allocated location.
|
||||
The original LV is updated to use the new temporary mirror segment
|
||||
in the pvmove LV instead of accessing the data directly.
|
||||
|
@@ -1,7 +0,0 @@
|
||||
pvremove wipes the label on a device so that LVM will no longer recognise
|
||||
it as a PV.
|
||||
|
||||
A PV cannot be removed from a VG while it is used by an active LV.
|
||||
|
||||
Repeat the force option (\fB-ff\fP) to forcibly remove a PV belonging to
|
||||
an existing VG. Normally, \fBvgreduce\fP(8) should be used instead.
|
@@ -1,2 +0,0 @@
|
||||
pvresize resizes a PV. The PV may already be in a VG and may have active
|
||||
LVs allocated on it.
|
@@ -1,5 +1,6 @@
|
||||
.SH NOTES
|
||||
|
||||
.IP \[bu] 3
|
||||
pvresize will refuse to shrink a PV if it has allocated extents beyond the
|
||||
new end.
|
||||
|
||||
|
@@ -1 +0,0 @@
|
||||
pvs produces formatted output about PVs.
|
@@ -1,6 +1,6 @@
|
||||
pvscan scans all supported LVM block devices in the system for PVs.
|
||||
.SH NOTES
|
||||
|
||||
\fBScanning with lvmetad\fP
|
||||
.SS Scanning with lvmetad
|
||||
|
||||
pvscan operates differently when used with the
|
||||
.BR lvmetad (8)
|
||||
@@ -64,9 +64,7 @@ be temporarily disabled if they are seen.
|
||||
To notify lvmetad about a device that is no longer present, the major and
|
||||
minor numbers must be given, not the path.
|
||||
|
||||
.P
|
||||
|
||||
\fBAutomatic activation\fP
|
||||
.SS Automatic activation
|
||||
|
||||
When event-driven system services detect a new LVM device, the first step
|
||||
is to automatically scan and cache the metadata from the device. This is
|
@@ -1,16 +0,0 @@
|
||||
vgcfgbackup creates back up files containing metadata of VGs.
|
||||
If no VGs are named, back up files are created for all VGs.
|
||||
See \fBvgcfgrestore\fP for information on using the back up
|
||||
files.
|
||||
|
||||
In a default installation, each VG is backed up into a separate file
|
||||
bearing the name of the VG in the directory \fI#DEFAULT_BACKUP_DIR#\fP.
|
||||
|
||||
To use an alternative back up file, use \fB\-f\fP. In this case, when
|
||||
backing up multiple VGs, the file name is treated as a template, with %s
|
||||
replaced by the VG name.
|
||||
|
||||
NB. This DOES NOT back up the data content of LVs.
|
||||
|
||||
It may also be useful to regularly back up the files in
|
||||
\fI#DEFAULT_SYS_DIR#\fP.
|
@@ -1,11 +0,0 @@
|
||||
vgcfgrestore restores the metadata of a VG from a text back up file
|
||||
produced by \fBvgcfgbackup\fP. This writes VG metadata onto the devices
|
||||
specifed in back up file.
|
||||
|
||||
A back up file can be specified with \fB\-\-file\fP. If no backup file is
|
||||
specified, the most recent one is used. Use \fB\-\-list\fP for a list of
|
||||
the available back up and archive files of a VG.
|
||||
|
||||
WARNING: When a VG contains thin pools, changes to thin metadata cannot be
|
||||
reverted, and data loss may occur if thin metadata has changed. The force
|
||||
option is required to restore in this case.
|
@@ -1,9 +1,11 @@
|
||||
.SH NOTES
|
||||
|
||||
.IP \[bu] 3
|
||||
To replace PVs, \fBvgdisplay \-\-partial \-\-verbose\fP will show the
|
||||
UUIDs and sizes of any PVs that are no longer present. If a PV in the VG
|
||||
is lost and you wish to substitute another of the same size, use
|
||||
\fBpvcreate \-\-restorefile filename \-\-uuid uuid\fP (plus additional
|
||||
arguments as appropriate) to initialise it with the same UUID as the
|
||||
missing PV. Repeat for all other missing PVs in the VG. Then use
|
||||
\fBvgcfgrestore \-\-file filename\fP to restore the VG's metadata.
|
||||
\fBvgcfgrestore \-\-file filename\fP to restore the volume group's
|
||||
metadata.
|
||||
|
@@ -1,2 +0,0 @@
|
||||
vgchange changes VG attributes, changes LV activation in the kernel, and
|
||||
includes other utilities for VG maintenance.
|
@@ -1,10 +1,4 @@
|
||||
.SH NOTES
|
||||
|
||||
If vgchange recognizes COW snapshot LVs that were dropped because they ran
|
||||
out of space, it displays a message informing the administrator that the
|
||||
snapshots should be removed.
|
||||
|
||||
.SH EXAMPLES
|
||||
.EXAMPLES
|
||||
|
||||
Activate all LVs in all VGs on all existing devices.
|
||||
.br
|
||||
|
@@ -1 +0,0 @@
|
||||
vgck checks LVM metadata for consistency.
|
@@ -1,7 +0,0 @@
|
||||
vgconvert converts VG metadata from one format to another. The new
|
||||
metadata format must be able to fit into the space provided by the old
|
||||
format.
|
||||
|
||||
Because the LVM1 format should no longer be used, this command is no
|
||||
longer needed in general.
|
||||
|
@@ -1,4 +0,0 @@
|
||||
vgcreate creates a new VG on block devices. If the devices were not
|
||||
previously intialized as PVs with \fBpvcreate\fP(8), vgcreate will
|
||||
inititialize them, making them PVs. The pvcreate options for initializing
|
||||
devices are also available with vgcreate.
|
@@ -1,4 +1,4 @@
|
||||
.SH EXAMPLES
|
||||
.EXAMPLES
|
||||
|
||||
Create a VG with two PVs, using the default physical extent size.
|
||||
.br
|
||||
|
@@ -1,4 +0,0 @@
|
||||
vgdisplay shows the attributes of VGs, and the associated PVs and LVs.
|
||||
|
||||
\fBvgs\fP(8) is a preferred alternative that shows the same information
|
||||
and more, using a more compact and configurable output format.
|
@@ -1,8 +1,14 @@
|
||||
vgexport makes inactive VGs unknown to the system. In this state, all the
|
||||
PVs in the VG can be moved to a different system, from which
|
||||
.SH NOTES
|
||||
.
|
||||
.IP \[bu] 3
|
||||
vgexport can make inactive VG(s) unknown to the system. In this state,
|
||||
all the PVs in the VG can be moved to a different system, from which
|
||||
\fBvgimport\fP can then be run.
|
||||
|
||||
.IP \[bu] 3
|
||||
Most LVM tools ignore exported VGs.
|
||||
|
||||
.IP \[bu] 3
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID to
|
||||
match the host running vgimport (if the host has a system ID).
|
||||
|
@@ -1,11 +0,0 @@
|
||||
vgextend adds one or more PVs to a VG. This increases the space available
|
||||
for LVs in the VG.
|
||||
|
||||
Also, PVs that have gone missing and then returned, e.g. due to a
|
||||
transient device failure, can be added back to the VG without
|
||||
re-initializing them (see \-\-restoremissing).
|
||||
|
||||
If the specified PVs have not yet been initialized with pvcreate, vgextend
|
||||
will initialize them. In this case pvcreate options can be used, e.g.
|
||||
\-\-labelsector, \-\-metadatasize, \-\-metadataignore,
|
||||
\-\-pvmetadatacopies, \-\-dataalignment, \-\-dataalignmentoffset.
|
@@ -1,3 +1,10 @@
|
||||
.SH NOTES
|
||||
|
||||
If the specified PVs have not yet been initialized with pvcreate,
|
||||
vgextend will initialize them. In this case the PV options apply,
|
||||
e.g. \-\-labelsector, \-\-metadatasize, \-\-metadataignore,
|
||||
\-\-pvmetadatacopies, \-\-dataalignment, \-\-dataalignmentoffset.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Add two PVs to a VG.
|
||||
|
@@ -1,5 +0,0 @@
|
||||
vgimport makes exported VGs known to the system again, perhaps after
|
||||
moving the PVs from a different system.
|
||||
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID to
|
||||
match the host running vgimport (if the host has a system ID).
|
9
man/vgimport.8.end
Normal file
9
man/vgimport.8.end
Normal file
@@ -0,0 +1,9 @@
|
||||
.SH NOTES
|
||||
.
|
||||
.IP \[bu] 3
|
||||
vgimport makes exported VG(s) known to the system again, perhaps
|
||||
after moving the PVs from a different system.
|
||||
|
||||
.IP \[bu] 3
|
||||
vgexport clears the VG system ID, and vgimport sets the VG system ID
|
||||
to match the host running vgimport (if the host has a system ID).
|
@@ -1,6 +0,0 @@
|
||||
vgimportclone imports a VG from duplicated PVs, e.g. created by a hardware
|
||||
snapshot of existing PVs.
|
||||
|
||||
A duplicated VG cannot used until it is made to coexist with the original
|
||||
VG. vgimportclone renames the VG associated with the specified PVs and
|
||||
changes the associated VG and PV UUIDs.
|
@@ -1,3 +1,14 @@
|
||||
.SH NOTES
|
||||
|
||||
.IP \[bu] 3
|
||||
vgimportclone can be used to import a VG from duplicated PVs (e.g. created
|
||||
by a hardware snapshot of the PV devices).
|
||||
|
||||
.IP \[bu] 3
|
||||
A duplicated VG cannot used until it is made to coexist with the original
|
||||
VG. vgimportclone renames the VG associated with the specified PVs and
|
||||
changes the associated VG and PV UUIDs.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
An original VG "vg00" has PVs "/dev/sda" and "/dev/sdb".
|
||||
|
@@ -1,3 +0,0 @@
|
||||
vgmerge merges two existing VGs. The inactive source VG is merged into the
|
||||
destination VG if physical extent sizes are equal and PV and LV summaries
|
||||
of both VGs fit into the destination VG's limits.
|
@@ -1,5 +0,0 @@
|
||||
vgmknodes checks the LVM device nodes in /dev that are needed for active
|
||||
LVs and creates any that are missing and removes unused ones.
|
||||
|
||||
This command should not usually be needed if all the system components are
|
||||
interoperating correctly.
|
@@ -1 +0,0 @@
|
||||
vgreduce removes one or more unused PVs from a VG.
|
@@ -1,9 +0,0 @@
|
||||
vgremove removes one or more VGs. If LVs exist in the VG, a prompt is used
|
||||
to confirm LV removal.
|
||||
|
||||
If one or more PVs in the VG are lost, consider
|
||||
\fBvgreduce \-\-removemissing\fP to make the VG
|
||||
metadata consistent again.
|
||||
|
||||
Repeat the force option (\fB-ff\fP) to forcibly remove LVs in the VG
|
||||
without confirmation.
|
@@ -1,9 +0,0 @@
|
||||
vgrename renames a VG.
|
||||
|
||||
All VGs visible to a system need to have different names, otherwise many
|
||||
LVM commands will refuse to run or give warning messages. VGs with the
|
||||
same name can occur when disks are moved between machines, or filters are
|
||||
changed. If a newly connected disk has a VG with the same name as the VG
|
||||
containing the root filesystem, the machine may not boot correctly. When
|
||||
two VGs have the same name, the VG UUID can be used in place of the source
|
||||
VG name.
|
@@ -1 +0,0 @@
|
||||
vgs produces formatted output about VGs.
|
@@ -1 +0,0 @@
|
||||
vgscan scans all supported LVM block devices in the system for VGs.
|
@@ -1,13 +1,18 @@
|
||||
vgsplit moves one or more PVs from a source VG to a destination VG. The
|
||||
.SH NOTES
|
||||
.
|
||||
.IP \[bu] 3
|
||||
vgsplit moves one or more PVs from a source VG to a destination VG. The
|
||||
PVs can be specified explicitly or implicitly by naming an LV, in which
|
||||
case on PVs underlying the LV are moved.
|
||||
|
||||
.IP \[bu] 3
|
||||
If the destination VG does not exist, a new VG is created (command options
|
||||
can be used to specify properties of the new VG, also see
|
||||
\fBvgcreate\fP(8).)
|
||||
can be used to specify properties of the new VG.)
|
||||
|
||||
.IP \[bu] 3
|
||||
LVs cannot be split between VGs; each LV must be entirely on the PVs in
|
||||
the source or destination VG.
|
||||
|
||||
vgsplit can only move complete PVs. (See \fBpvmove\fP(8) for moving part
|
||||
of a PV.)
|
||||
.IP \[bu] 3
|
||||
vgsplit can only move complete PVs. (See pvmove for moving part of a PV.)
|
||||
|
@@ -39,7 +39,7 @@ S ?= @ # never match anything by default
|
||||
VERBOSE ?= 0
|
||||
ALL := $(shell find -L $(srcdir) \( -path \*/shell/\*.sh -or -path \*/api/\*.sh \) | sort)
|
||||
comma = ,
|
||||
RUN := $(shell find -L $(srcdir) -regextype posix-egrep \( -path \*/shell/\*.sh -or -path \*/api/\*.sh \) -and -regex "$(srcdir)/.*($(subst $(comma),|,$(T))).*" -and -not -regex "$(srcdir)/.*($(subst $(comma),|,$(S))).*" | ($SORT))
|
||||
RUN := $(shell find -L $(srcdir) -regextype posix-egrep \( -path \*/shell/\*.sh -or -path \*/api/\*.sh \) -and -regex "$(srcdir)/.*($(subst $(comma),|,$(T))).*" -and -not -regex "$(srcdir)/.*($(subst $(comma),|,$(S))).*" | sort)
|
||||
RUN_BASE = $(subst $(srcdir)/,,$(RUN))
|
||||
|
||||
ifeq ("@BUILD_LVMETAD@", "yes")
|
||||
@@ -267,7 +267,7 @@ lib/%: lib/%.o .lib-dir-stamp
|
||||
|
||||
lib/%: $(srcdir)/lib/%.sh .lib-dir-stamp
|
||||
cp $< $@
|
||||
$(CHMOD) +x $@
|
||||
chmod +x $@
|
||||
|
||||
lib/flavour-%: $(srcdir)/lib/flavour-%.sh .lib-dir-stamp
|
||||
cp $< $@
|
||||
|
@@ -405,28 +405,23 @@ teardown_devs_prefixed() {
|
||||
fi
|
||||
|
||||
# Remove devices, start with closed (sorted by open count)
|
||||
# Run 'dmsetup remove' in parallel
|
||||
local remfail=no
|
||||
local need_udev_wait=0
|
||||
rm -f REMOVE_FAILED
|
||||
#local listdevs=( $(dm_info name,open --sort open,name | grep "$prefix.*:0") )
|
||||
#dmsetup remove --deferred ${listdevs[@]%%:0} || touch REMOVE_FAILED
|
||||
|
||||
init_udev_transaction
|
||||
for dm in $(dm_info name --sort open,name | grep "$prefix"); do
|
||||
dmsetup remove "$dm" &>/dev/null || touch REMOVE_FAILED &
|
||||
for dm in $(dm_info name --sort open | grep "$prefix"); do
|
||||
dmsetup remove "$dm" &>/dev/null || remfail=yes
|
||||
need_udev_wait=1
|
||||
done
|
||||
wait
|
||||
finish_udev_transaction
|
||||
test $need_udev_wait -eq 0 || udev_wait
|
||||
|
||||
if test -f REMOVE_FAILED; then
|
||||
if test $remfail = yes; then
|
||||
local num_devs
|
||||
local num_remaining_devs=999
|
||||
while num_devs=$(dm_table | grep "$prefix" | wc -l) && \
|
||||
test $num_devs -lt $num_remaining_devs -a $num_devs -ne 0; do
|
||||
test "$stray" -eq 0 || echo "Removing $num_devs stray mapped devices with names beginning with $prefix: "
|
||||
# HACK: sort also by minors - so we try to close 'possibly later' created device first
|
||||
# HACK: sort also by minors - so we try to close 'possibly later' created device first
|
||||
for dm in $(dm_info name --sort open,-minor | grep "$prefix") ; do
|
||||
dmsetup remove -f "$dm" || true
|
||||
done
|
||||
@@ -799,7 +794,6 @@ prepare_devs() {
|
||||
|
||||
local size=$(($devsize*2048)) # sectors
|
||||
local count=0
|
||||
rm -f CREATE_FAILED
|
||||
init_udev_transaction
|
||||
for i in $(seq 1 $n); do
|
||||
local name="${PREFIX}$pvname$i"
|
||||
@@ -807,19 +801,17 @@ prepare_devs() {
|
||||
DEVICES[$count]=$dev
|
||||
count=$(( $count + 1 ))
|
||||
echo 0 $size linear "$BACKING_DEV" $((($i-1)*$size + $shift)) > "$name.table"
|
||||
dmsetup create -u "TEST-$name" "$name" "$name.table" || touch CREATE_FAILED &
|
||||
test -f CREATE_FAILED && break;
|
||||
if not dmsetup create -u "TEST-$name" "$name" "$name.table" &&
|
||||
test -n "$LVM_TEST_BACKING_DEVICE";
|
||||
then # maybe the backing device is too small for this test
|
||||
LVM_TEST_BACKING_DEVICE=
|
||||
rm -f BACKING_DEV
|
||||
prepare_devs "$@"
|
||||
return $?
|
||||
fi
|
||||
done
|
||||
wait
|
||||
finish_udev_transaction
|
||||
|
||||
if test -f CREATE_FAILED -a -n "$LVM_TEST_BACKING_DEVICE"; then
|
||||
LVM_TEST_BACKING_DEVICE=
|
||||
rm -f BACKING_DEV CREATE_FAILED
|
||||
prepare_devs "$@"
|
||||
return $?
|
||||
fi
|
||||
|
||||
# non-ephemeral devices need to be cleared between tests
|
||||
test -f LOOP || for d in ${DEVICES[@]}; do
|
||||
blkdiscard "$d" 2>/dev/null || true
|
||||
@@ -841,11 +833,9 @@ prepare_devs() {
|
||||
# ( IFS=$'\n'; echo "${DEVICES[*]}" ) >DEVICES
|
||||
echo "ok"
|
||||
|
||||
if test -e LOCAL_LVMETAD; then
|
||||
for dev in "${DEVICES[@]}"; do
|
||||
notify_lvmetad "$dev"
|
||||
done
|
||||
fi
|
||||
for dev in "${DEVICES[@]}"; do
|
||||
notify_lvmetad "$dev"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
@@ -1306,7 +1296,7 @@ udev_wait() {
|
||||
wait_for_sync() {
|
||||
local i
|
||||
for i in {1..100} ; do
|
||||
check in_sync $1 $2 $3 && return
|
||||
check in_sync $1 $2 && return
|
||||
sleep .2
|
||||
done
|
||||
|
||||
@@ -1423,10 +1413,10 @@ have_raid() {
|
||||
}
|
||||
|
||||
have_raid4 () {
|
||||
local r=0
|
||||
local r=1
|
||||
|
||||
have_raid 1 8 0 && r=1
|
||||
have_raid 1 9 1 && r=0
|
||||
have_raid 1 8 0 && r=0
|
||||
have_raid 1 9 1 && r=1
|
||||
|
||||
return $r
|
||||
}
|
||||
|
@@ -178,7 +178,7 @@ linear() {
|
||||
$(lvl $lv -o+devices)
|
||||
}
|
||||
|
||||
# in_sync <VG> <LV> <ignore 'a'>
|
||||
# in_sync <VG> <LV>
|
||||
# Works for "mirror" and "raid*"
|
||||
in_sync() {
|
||||
local a
|
||||
@@ -187,11 +187,8 @@ in_sync() {
|
||||
local type
|
||||
local snap=""
|
||||
local lvm_name="$1/$2"
|
||||
local ignore_a="$3"
|
||||
local dm_name=$(echo $lvm_name | sed s:-:--: | sed s:/:-:)
|
||||
|
||||
[ -z "$ignore_a" ] && ignore_a=0
|
||||
|
||||
a=( $(dmsetup status $dm_name) ) || \
|
||||
die "Unable to get sync status of $1"
|
||||
|
||||
@@ -228,7 +225,7 @@ in_sync() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
[[ ${a[$(($idx - 1))]} =~ a ]] && [ $ignore_a -eq 0 ] && \
|
||||
[[ ${a[$(($idx - 1))]} =~ a ]] && \
|
||||
die "$lvm_name ($type$snap) in-sync, but 'a' characters in health status"
|
||||
|
||||
echo "$lvm_name ($type$snap) is in-sync \"${a[@]}\""
|
||||
@@ -313,12 +310,6 @@ lv_field() {
|
||||
die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\""
|
||||
}
|
||||
|
||||
lv_first_seg_field() {
|
||||
local actual=$(get lv_first_seg_field "$1" "$2" "${@:4}")
|
||||
test "$actual" = "$3" || \
|
||||
die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\""
|
||||
}
|
||||
|
||||
lvh_field() {
|
||||
local actual=$(get lvh_field "$1" "$2" "${@:4}")
|
||||
test "$actual" = "$3" || \
|
||||
|
@@ -42,11 +42,6 @@ lv_field() {
|
||||
trim_ "$r"
|
||||
}
|
||||
|
||||
lv_first_seg_field() {
|
||||
local r=$(lvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1" | head -1)
|
||||
trim_ "$r"
|
||||
}
|
||||
|
||||
lvh_field() {
|
||||
local r=$(lvs -H --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1")
|
||||
trim_ "$r"
|
||||
|
@@ -77,11 +77,6 @@ find "$TESTOLDPWD/lib" ! \( -name '*.sh' -o -name '*.[cdo]' \
|
||||
DM_DEFAULT_NAME_MANGLING_MODE=none
|
||||
DM_DEV_DIR="$TESTDIR/dev"
|
||||
LVM_SYSTEM_DIR="$TESTDIR/etc"
|
||||
# abort on the internal dm errors in the tests (allowing test user override)
|
||||
DM_ABORT_ON_INTERNAL_ERRORS=${DM_ABORT_ON_INTERNAL_ERRORS:-1}
|
||||
|
||||
export DM_DEFAULT_NAME_MANGLING_MODE DM_DEV_DIR LVM_SYSTEM_DIR DM_ABORT_ON_INTERNAL_ERRORS
|
||||
|
||||
mkdir "$LVM_SYSTEM_DIR" "$DM_DEV_DIR"
|
||||
if test -n "$LVM_TEST_DEVDIR" ; then
|
||||
test -d "$LVM_TEST_DEVDIR" || die "Test device directory LVM_TEST_DEVDIR=\"$LVM_TEST_DEVDIR\" is not valid."
|
||||
@@ -90,11 +85,14 @@ else
|
||||
mknod "$DM_DEV_DIR/testnull" c 1 3 || die "mknod failed"
|
||||
echo >"$DM_DEV_DIR/testnull" || \
|
||||
die "Filesystem does support devices in $DM_DEV_DIR (mounted with nodev?)"
|
||||
# dmsetup makes here needed control entry if still missing
|
||||
dmsetup version || \
|
||||
die "Dmsetup in $DM_DEV_DIR can't report version?"
|
||||
mkdir "$DM_DEV_DIR/mapper"
|
||||
fi
|
||||
|
||||
# abort on the internal dm errors in the tests (allowing test user override)
|
||||
DM_ABORT_ON_INTERNAL_ERRORS=${DM_ABORT_ON_INTERNAL_ERRORS:-1}
|
||||
|
||||
export DM_DEFAULT_NAME_MANGLING_MODE DM_DEV_DIR LVM_SYSTEM_DIR DM_ABORT_ON_INTERNAL_ERRORS
|
||||
|
||||
echo "$TESTNAME" >TESTNAME
|
||||
|
||||
echo "Kernel is $(uname -a)"
|
||||
|
@@ -1,72 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# unrelated to lvm2 daemons
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
SKIP_WITH_CLVMD=1
|
||||
SKIP_WITH_LVMETAD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
CIPHER=aes-xts-plain64
|
||||
HEXKEY_32=0102030405060708090a0102030405060102030405060708090a010203040506
|
||||
HIDENKEY_32=0000000000000000000000000000000000000000000000000000000000000000
|
||||
KEY_NAME="$PREFIX:keydesc"
|
||||
|
||||
function _teardown() {
|
||||
keyctl unlink %:$PREFIX-keyring
|
||||
aux teardown_devs_prefixed $PREFIX
|
||||
}
|
||||
|
||||
aux target_at_least dm-zero 1 0 0 || skip "missing dm-zero target"
|
||||
aux target_at_least dm-crypt 1 15 0 || skip "dm-crypt doesn't support keys in kernel keyring service"
|
||||
which keyctl || skip "test requires keyctl utility"
|
||||
|
||||
keyctl newring $PREFIX-keyring @u
|
||||
keyctl timeout %:$PREFIX-keyring 60
|
||||
|
||||
trap '_teardown' EXIT
|
||||
|
||||
keyctl add logon $KEY_NAME ${HEXKEY_32:0:32} %:$PREFIX-keyring
|
||||
|
||||
dmsetup create $PREFIX-zero --table "0 1 zero"
|
||||
# put key in kernel keyring for active table
|
||||
dmsetup create $PREFIX-crypt --table "0 1 crypt $CIPHER :32:logon:$KEY_NAME 0 $TESTDIR/dev$prefix/mapper/$PREFIX-zero 0"
|
||||
# put hexbyte key in dm-crypt directly in inactive table
|
||||
dmsetup load $PREFIX-crypt --table "0 1 crypt $CIPHER $HEXKEY_32 0 $TESTDIR/dev$prefix/mapper/$PREFIX-zero 0"
|
||||
|
||||
# test dmsetup doesn't hide key descriptions...
|
||||
str=`dmsetup table $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = :32:logon:$KEY_NAME || die
|
||||
str=`dmsetup table --showkeys $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = :32:logon:$KEY_NAME || die
|
||||
|
||||
# ...but it hides hexbyte representation of keys...
|
||||
str=`dmsetup table --inactive $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = $HIDENKEY_32 || die
|
||||
#...unless --showkeys explictly requested
|
||||
str=`dmsetup table --showkeys --inactive $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = $HEXKEY_32 || die
|
||||
|
||||
# let's swap the tables
|
||||
dmsetup resume $PREFIX-crypt
|
||||
dmsetup load $PREFIX-crypt --table "0 1 crypt $CIPHER :32:logon:$KEY_NAME 0 $TESTDIR/dev$prefix/mapper/$PREFIX-zero 0"
|
||||
|
||||
str=`dmsetup table --inactive $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = :32:logon:$KEY_NAME || die
|
||||
str=`dmsetup table --showkeys --inactive $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = :32:logon:$KEY_NAME || die
|
||||
|
||||
str=`dmsetup table $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = $HIDENKEY_32 || die
|
||||
str=`dmsetup table --showkeys $PREFIX-crypt | cut -d ' ' -f 5`
|
||||
test $str = $HEXKEY_32 || die
|
@@ -1,41 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 3 5 || skip
|
||||
|
||||
aux prepare_vg 4
|
||||
|
||||
for d in $dev1 $dev2 $dev3 $dev4
|
||||
do
|
||||
aux delay_dev $d 1
|
||||
done
|
||||
|
||||
#
|
||||
# Test writemostly prohibited on resyncrhonizing raid1
|
||||
#
|
||||
|
||||
# Create 4-way striped LV
|
||||
lvcreate -aey --ty raid1 -m 3 -L 32M -n $lv1 $vg
|
||||
not lvchange -y --writemostly $dev1 $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
check lv_field $vg/$lv1 stripes 4
|
||||
check lv_attr_bit health $vg/${lv1}_rimage_0 "-"
|
||||
aux wait_for_sync $vg $lv1
|
||||
lvchange -y --writemostly $dev1 $vg/$lv1
|
||||
check lv_attr_bit health $vg/${lv1}_rimage_0 "w"
|
||||
|
||||
vgremove -ff $vg
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user