mirror of
git://sourceware.org/git/lvm2.git
synced 2025-10-26 07:33:16 +03:00
Compare commits
147 Commits
dev-bmr-dm
...
dev-lvmguy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d98932039f | ||
|
|
efecf4a1d2 | ||
|
|
d2dbe71fb3 | ||
|
|
1c1ce2739e | ||
|
|
2a65e2d49e | ||
|
|
7c14004f69 | ||
|
|
d409fec079 | ||
|
|
921d5972e8 | ||
|
|
86a660be7b | ||
|
|
e13639053b | ||
|
|
d22cccdf63 | ||
|
|
a2d2cd8777 | ||
|
|
2396c50689 | ||
|
|
4d29d9afb2 | ||
|
|
5f2639d01d | ||
|
|
13dd1ca757 | ||
|
|
2260c6d1e6 | ||
|
|
d8514b24e1 | ||
|
|
aff62c74b7 | ||
|
|
f16ad760cb | ||
|
|
cd24e6de89 | ||
|
|
665479b818 | ||
|
|
2a21a19d90 | ||
|
|
87c89ac279 | ||
|
|
13944738d4 | ||
|
|
fab0d63121 | ||
|
|
87fe9328d3 | ||
|
|
6064b0359a | ||
|
|
50e0345f9d | ||
|
|
10cb8e0ec0 | ||
|
|
3cf3943898 | ||
|
|
f88ce5fb99 | ||
|
|
56deed9d54 | ||
|
|
00f299b932 | ||
|
|
238a79aac4 | ||
|
|
995f7aa92f | ||
|
|
46d6c7d3ad | ||
|
|
772834e40a | ||
|
|
582a272b3f | ||
|
|
80b717af0c | ||
|
|
298b11aed1 | ||
|
|
1cb95fa5a0 | ||
|
|
15f51bc421 | ||
|
|
22bee4fbdb | ||
|
|
81c0ed9fc6 | ||
|
|
6a5c9ba349 | ||
|
|
2556498ee1 | ||
|
|
4a30f5f9b0 | ||
|
|
286d39ee3c | ||
|
|
d3af0e7528 | ||
|
|
7417c8acfa | ||
|
|
d2c5bb5c70 | ||
|
|
1236e0ed29 | ||
|
|
1dddb068c9 | ||
|
|
c0f2a59993 | ||
|
|
f46b28bdb6 | ||
|
|
c868562a79 | ||
|
|
22457ed4d9 | ||
|
|
fc0e49297d | ||
|
|
3350eb67cc | ||
|
|
6118b0fb93 | ||
|
|
65b82a8072 | ||
|
|
05dd566a52 | ||
|
|
5a87d8667d | ||
|
|
5ab051df7a | ||
|
|
c816e8b636 | ||
|
|
717363bb94 | ||
|
|
b2fd5b31d3 | ||
|
|
698abdde16 | ||
|
|
13a6368522 | ||
|
|
a1386dcca0 | ||
|
|
46abc28a48 | ||
|
|
8152e4a99e | ||
|
|
2224b6a701 | ||
|
|
c3faa5816d | ||
|
|
942dc07402 | ||
|
|
f067c0ad78 | ||
|
|
c3e1838382 | ||
|
|
15d9f2850e | ||
|
|
013c080756 | ||
|
|
d9d5b8414b | ||
|
|
9a0f0c70bf | ||
|
|
23a1ced439 | ||
|
|
3642deb4f3 | ||
|
|
4d2c3502e7 | ||
|
|
5c779b3231 | ||
|
|
db26a82f2f | ||
|
|
041c2fef88 | ||
|
|
46b2599d5c | ||
|
|
a801b92b2c | ||
|
|
86d8ab493b | ||
|
|
95e38607ec | ||
|
|
0e3e611a13 | ||
|
|
d71aaca07b | ||
|
|
fa2a728a39 | ||
|
|
254bffb95d | ||
|
|
35b9d4d6e9 | ||
|
|
52c60b7e7b | ||
|
|
9c6c55c314 | ||
|
|
1e2420bca8 | ||
|
|
19267fa6aa | ||
|
|
2923d9e47d | ||
|
|
c3b1f1a07a | ||
|
|
a3579aafc5 | ||
|
|
7cbee8f31a | ||
|
|
b6301aa977 | ||
|
|
a39c828c01 | ||
|
|
717d0c6b94 | ||
|
|
b185a3e333 | ||
|
|
aa0c735e2c | ||
|
|
416f951283 | ||
|
|
a7d2ee4bc2 | ||
|
|
0844b20f98 | ||
|
|
375e38709c | ||
|
|
2a9eda1229 | ||
|
|
8296b99a89 | ||
|
|
28ea66d46d | ||
|
|
caa2094e33 | ||
|
|
c908a8b131 | ||
|
|
035c614c19 | ||
|
|
baba3f8e2a | ||
|
|
68ec6940e6 | ||
|
|
55eaabd118 | ||
|
|
79f31008fa | ||
|
|
69fe8729f3 | ||
|
|
46a772fbc4 | ||
|
|
91c4bd14d0 | ||
|
|
cfb6ef654d | ||
|
|
51d03acb1c | ||
|
|
48abbdf452 | ||
|
|
811d137d3f | ||
|
|
9fe8c2da36 | ||
|
|
12bbfbe89d | ||
|
|
fb3f4ed72d | ||
|
|
dae4f53acb | ||
|
|
a4bbaa3b89 | ||
|
|
d8568552e4 | ||
|
|
3673ce48e0 | ||
|
|
96f331fe05 | ||
|
|
7e92535d47 | ||
|
|
60ddd05f16 | ||
|
|
875ce04c61 | ||
|
|
3eccbb4b47 | ||
|
|
945842fa68 | ||
|
|
681d69c70a | ||
|
|
a010cede6e | ||
|
|
83a1907586 |
@@ -59,6 +59,8 @@ liblvm: lib
|
||||
daemons: lib libdaemon tools
|
||||
tools: lib libdaemon device-mapper
|
||||
po: tools daemons
|
||||
man: tools
|
||||
all_man: tools
|
||||
scripts: liblvm libdm
|
||||
|
||||
lib.device-mapper: include.device-mapper
|
||||
|
||||
19
WHATS_NEW
19
WHATS_NEW
@@ -1,5 +1,24 @@
|
||||
Version 2.02.169 -
|
||||
=====================================
|
||||
Add lvconvert --swapmetadata, new specific way to swap pool metadata LVs.
|
||||
Add lvconvert --startpoll, new specific way to start polling conversions.
|
||||
Add lvconvert --mergethin, new specific way to merge thin snapshots.
|
||||
Add lvconvert --mergemirrors, new specific way to merge split mirrors.
|
||||
Add lvconvert --mergesnapshot, new specific way to combine cow LVs.
|
||||
Split up lvconvert code based on command definitions.
|
||||
Split up lvchange code based on command definitions.
|
||||
Generate help output and man pages from command definitions.
|
||||
Verify all command line items against command definition.
|
||||
Match every command run to one command definition.
|
||||
Specify every allowed command definition/syntax in command-lines.in.
|
||||
Add extra memory page when limiting pthread stack size in clvmd.
|
||||
Support striped/raid0* <-> raid10_near conversions
|
||||
Support shrinking of RaidLvs
|
||||
Support region size changes on existing RaidLVs
|
||||
Avoid parallel usage of cpg_mcast_joined() in clvmd with corosync.
|
||||
Support raid6_{ls,rs,la,ra}_6 segment types and conversions from/to it.
|
||||
Support raid6_n_6 segment type and conversions from/to it.
|
||||
Support raid5_n segment type and conversions from/to it.
|
||||
Support new internal command _dmeventd_thin_command.
|
||||
Introduce new dmeventd/thin_command configurable setting.
|
||||
Use new default units 'r' for displaying sizes.
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
Version 1.02.138 -
|
||||
=====================================
|
||||
Add extra memory page when limiting pthread stack size in dmeventd.
|
||||
Avoids immediate resume when preloaded device is smaller.
|
||||
Do not suppress kernel key description in dmsetup table output.
|
||||
Support configurable command executed from dmeventd thin plugin.
|
||||
Support new R|r human readable units output format.
|
||||
Thin dmeventd plugin reacts faster on lvextend failure path with umount.
|
||||
|
||||
@@ -2054,6 +2054,7 @@ dmeventd {
|
||||
# or metadata volume gets above 50%.
|
||||
# Command which starts with 'lvm ' prefix is internal lvm command.
|
||||
# You can write your own handler to customise behaviour in more details.
|
||||
# User handler is specified with the full path starting with '/'.
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_command = "lvm lvextend --use-policies"
|
||||
|
||||
|
||||
198
configure
vendored
198
configure
vendored
@@ -821,6 +821,8 @@ HAVE_PIE
|
||||
POW_LIB
|
||||
LIBOBJS
|
||||
ALLOCA
|
||||
SORT
|
||||
WC
|
||||
CHMOD
|
||||
CSCOPE_CMD
|
||||
CFLOW_CMD
|
||||
@@ -5234,6 +5236,202 @@ else
|
||||
CHMOD="$ac_cv_path_CHMOD"
|
||||
fi
|
||||
|
||||
if test -n "$ac_tool_prefix"; then
|
||||
# Extract the first word of "${ac_tool_prefix}wc", so it can be a program name with args.
|
||||
set dummy ${ac_tool_prefix}wc; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_WC+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $WC in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_WC="$WC" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_WC="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
WC=$ac_cv_path_WC
|
||||
if test -n "$WC"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $WC" >&5
|
||||
$as_echo "$WC" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
fi
|
||||
if test -z "$ac_cv_path_WC"; then
|
||||
ac_pt_WC=$WC
|
||||
# Extract the first word of "wc", so it can be a program name with args.
|
||||
set dummy wc; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_ac_pt_WC+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $ac_pt_WC in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_ac_pt_WC="$ac_pt_WC" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_ac_pt_WC="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
ac_pt_WC=$ac_cv_path_ac_pt_WC
|
||||
if test -n "$ac_pt_WC"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_WC" >&5
|
||||
$as_echo "$ac_pt_WC" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
if test "x$ac_pt_WC" = x; then
|
||||
WC=""
|
||||
else
|
||||
case $cross_compiling:$ac_tool_warned in
|
||||
yes:)
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
|
||||
$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
|
||||
ac_tool_warned=yes ;;
|
||||
esac
|
||||
WC=$ac_pt_WC
|
||||
fi
|
||||
else
|
||||
WC="$ac_cv_path_WC"
|
||||
fi
|
||||
|
||||
if test -n "$ac_tool_prefix"; then
|
||||
# Extract the first word of "${ac_tool_prefix}sort", so it can be a program name with args.
|
||||
set dummy ${ac_tool_prefix}sort; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_SORT+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $SORT in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_SORT="$SORT" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_SORT="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
SORT=$ac_cv_path_SORT
|
||||
if test -n "$SORT"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $SORT" >&5
|
||||
$as_echo "$SORT" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
fi
|
||||
if test -z "$ac_cv_path_SORT"; then
|
||||
ac_pt_SORT=$SORT
|
||||
# Extract the first word of "sort", so it can be a program name with args.
|
||||
set dummy sort; ac_word=$2
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
|
||||
$as_echo_n "checking for $ac_word... " >&6; }
|
||||
if ${ac_cv_path_ac_pt_SORT+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
case $ac_pt_SORT in
|
||||
[\\/]* | ?:[\\/]*)
|
||||
ac_cv_path_ac_pt_SORT="$ac_pt_SORT" # Let the user override the test with a path.
|
||||
;;
|
||||
*)
|
||||
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
|
||||
for as_dir in $PATH
|
||||
do
|
||||
IFS=$as_save_IFS
|
||||
test -z "$as_dir" && as_dir=.
|
||||
for ac_exec_ext in '' $ac_executable_extensions; do
|
||||
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
|
||||
ac_cv_path_ac_pt_SORT="$as_dir/$ac_word$ac_exec_ext"
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
|
||||
break 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
IFS=$as_save_IFS
|
||||
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
ac_pt_SORT=$ac_cv_path_ac_pt_SORT
|
||||
if test -n "$ac_pt_SORT"; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_SORT" >&5
|
||||
$as_echo "$ac_pt_SORT" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
if test "x$ac_pt_SORT" = x; then
|
||||
SORT=""
|
||||
else
|
||||
case $cross_compiling:$ac_tool_warned in
|
||||
yes:)
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
|
||||
$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
|
||||
ac_tool_warned=yes ;;
|
||||
esac
|
||||
SORT=$ac_pt_SORT
|
||||
fi
|
||||
else
|
||||
SORT="$ac_cv_path_SORT"
|
||||
fi
|
||||
|
||||
|
||||
################################################################################
|
||||
ac_header_dirent=no
|
||||
|
||||
@@ -86,6 +86,8 @@ AC_PROG_RANLIB
|
||||
AC_PATH_TOOL(CFLOW_CMD, cflow)
|
||||
AC_PATH_TOOL(CSCOPE_CMD, cscope)
|
||||
AC_PATH_TOOL(CHMOD, chmod)
|
||||
AC_PATH_TOOL(WC, wc)
|
||||
AC_PATH_TOOL(SORT, sort)
|
||||
|
||||
################################################################################
|
||||
dnl -- Check for header files.
|
||||
|
||||
@@ -532,6 +532,7 @@ static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
|
||||
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
|
||||
const char *errtext)
|
||||
{
|
||||
static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
struct iovec iov[2];
|
||||
cs_error_t err;
|
||||
int target_node;
|
||||
@@ -546,7 +547,10 @@ static int _cluster_send_message(const void *buf, int msglen, const char *csid,
|
||||
iov[1].iov_base = (char *)buf;
|
||||
iov[1].iov_len = msglen;
|
||||
|
||||
pthread_mutex_lock(&_mutex);
|
||||
err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2);
|
||||
pthread_mutex_unlock(&_mutex);
|
||||
|
||||
return cs_to_errno(err);
|
||||
}
|
||||
|
||||
|
||||
@@ -517,7 +517,7 @@ int main(int argc, char *argv[])
|
||||
/* Initialise the LVM thread variables */
|
||||
dm_list_init(&lvm_cmd_head);
|
||||
if (pthread_attr_init(&stack_attr) ||
|
||||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE)) {
|
||||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE + getpagesize())) {
|
||||
log_sys_error("pthread_attr_init", "");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -468,7 +468,7 @@ static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *
|
||||
/*
|
||||
* We use a smaller stack since it gets preallocated in its entirety
|
||||
*/
|
||||
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
|
||||
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE + getpagesize());
|
||||
|
||||
/*
|
||||
* If no-one will be waiting, we need to detach.
|
||||
|
||||
@@ -184,16 +184,12 @@ int register_device(const char *device,
|
||||
goto_bad;
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
|
||||
"lvscan --cache", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
"lvscan --cache", device))
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
|
||||
"lvconvert --repair --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
"lvconvert --repair --use-policies", device))
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
*user = state;
|
||||
|
||||
@@ -203,6 +199,9 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor mirror %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -140,10 +140,8 @@ int register_device(const char *device,
|
||||
"lvscan --cache", device) ||
|
||||
!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
|
||||
"lvconvert --config devices{ignore_suspended_devices=1} "
|
||||
"--repair --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
"--repair --use-policies", device))
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
*user = state;
|
||||
|
||||
@@ -153,6 +151,9 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor RAID %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -254,10 +254,8 @@ int register_device(const char *device,
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend,
|
||||
sizeof(state->cmd_lvextend),
|
||||
"lvextend --use-policies", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
"lvextend --use-policies", device))
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
state->percent_check = CHECK_MINIMUM;
|
||||
*user = state;
|
||||
@@ -268,6 +266,9 @@ int register_device(const char *device,
|
||||
bad:
|
||||
log_error("Failed to monitor snapshot %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
|
||||
#include <sys/wait.h>
|
||||
#include <stdarg.h>
|
||||
#include <pthread.h>
|
||||
|
||||
/* TODO - move this mountinfo code into library to be reusable */
|
||||
#ifdef __linux__
|
||||
@@ -59,8 +58,8 @@ struct dso_state {
|
||||
int restore_sigset;
|
||||
sigset_t old_sigset;
|
||||
pid_t pid;
|
||||
char **argv;
|
||||
char cmd_str[1024];
|
||||
char *argv[3];
|
||||
char *cmd_str;
|
||||
};
|
||||
|
||||
DM_EVENT_LOG_FN("thin")
|
||||
@@ -86,7 +85,7 @@ static int _run_command(struct dso_state *state)
|
||||
} else {
|
||||
/* For an error event it's for a user to check status and decide */
|
||||
env[1] = NULL;
|
||||
log_debug("Error event processing");
|
||||
log_debug("Error event processing.");
|
||||
}
|
||||
|
||||
log_verbose("Executing command: %s", state->cmd_str);
|
||||
@@ -116,7 +115,7 @@ static int _use_policy(struct dm_task *dmt, struct dso_state *state)
|
||||
#if THIN_DEBUG
|
||||
log_debug("dmeventd executes: %s.", state->cmd_str);
|
||||
#endif
|
||||
if (state->argv)
|
||||
if (state->argv[0])
|
||||
return _run_command(state);
|
||||
|
||||
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
|
||||
@@ -353,34 +352,41 @@ int register_device(const char *device,
|
||||
void **user)
|
||||
{
|
||||
struct dso_state *state;
|
||||
int maxcmd;
|
||||
char *str;
|
||||
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
|
||||
|
||||
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
|
||||
goto_bad;
|
||||
|
||||
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
|
||||
sizeof(state->cmd_str),
|
||||
"_dmeventd_thin_command", device)) {
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str),
|
||||
"_dmeventd_thin_command", device))
|
||||
goto_bad;
|
||||
}
|
||||
|
||||
if (strncmp(state->cmd_str, "lvm ", 4)) {
|
||||
maxcmd = 2; /* space for last NULL element */
|
||||
for (str = state->cmd_str; *str; str++)
|
||||
if (*str == ' ')
|
||||
maxcmd++;
|
||||
if (!(str = dm_pool_strdup(state->mem, state->cmd_str)) ||
|
||||
!(state->argv = dm_pool_zalloc(state->mem, maxcmd * sizeof(char *)))) {
|
||||
log_error("Failed to allocate memory for command.");
|
||||
if (strncmp(cmd_str, "lvm ", 4) == 0) {
|
||||
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
|
||||
log_error("Failed to copy lvm command.");
|
||||
goto bad;
|
||||
}
|
||||
} else if (cmd_str[0] == '/') {
|
||||
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
|
||||
log_error("Failed to copy thin command.");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
dm_split_words(str, maxcmd - 1, 0, state->argv);
|
||||
/* Find last space before 'vg/lv' */
|
||||
if (!(str = strrchr(state->cmd_str, ' ')))
|
||||
goto inval;
|
||||
|
||||
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
|
||||
str - state->cmd_str))) {
|
||||
log_error("Failed to copy command.");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
state->argv[1] = str + 1; /* 1 argument - vg/lv */
|
||||
_init_thread_signals(state);
|
||||
} else
|
||||
memmove(state->cmd_str, state->cmd_str + 4, strlen(state->cmd_str + 4) + 1);
|
||||
} else /* Unuspported command format */
|
||||
goto inval;
|
||||
|
||||
state->pid = -1;
|
||||
*user = state;
|
||||
@@ -388,9 +394,14 @@ int register_device(const char *device,
|
||||
log_info("Monitoring thin pool %s.", device);
|
||||
|
||||
return 1;
|
||||
inval:
|
||||
log_error("Invalid command for monitoring: %s.", cmd_str);
|
||||
bad:
|
||||
log_error("Failed to monitor thin pool %s.", device);
|
||||
|
||||
if (state)
|
||||
dmeventd_lvm2_exit_with_pool(state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -295,7 +295,7 @@ def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
@@ -303,20 +303,18 @@ def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
else:
|
||||
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
|
||||
return cmd
|
||||
|
||||
|
||||
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
|
||||
cmd.extend(['--name', name, vg_name])
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
if not thin_pool:
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
else:
|
||||
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
|
||||
|
||||
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
|
||||
cmd.extend(['--stripes', str(num_stripes)])
|
||||
|
||||
if stripe_size_kb != 0:
|
||||
|
||||
@@ -272,6 +272,26 @@ class LvCommon(AutomatedProperties):
|
||||
self.state = object_state
|
||||
self._move_pv = self._get_move_pv()
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(lv_uuid, lv_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
if not dbo:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
return dbo
|
||||
|
||||
@property
|
||||
def VolumeType(self):
|
||||
type_map = {'C': 'Cache', 'm': 'mirrored',
|
||||
@@ -408,24 +428,10 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _remove(lv_uuid, lv_name, remove_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# Remove the LV, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Remove the LV, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -443,24 +449,11 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _rename(lv_uuid, lv_name, new_name, rename_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# Rename the logical volume
|
||||
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Rename the logical volume
|
||||
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -500,32 +493,21 @@ class Lv(LvCommon):
|
||||
def _snap_shot(lv_uuid, lv_name, name, optional_size,
|
||||
snapshot_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# If you specify a size you get a 'thick' snapshot even if
|
||||
# it is a thin lv
|
||||
if not dbo.IsThinVolume:
|
||||
if optional_size == 0:
|
||||
space = dbo.SizeBytes / 80
|
||||
remainder = space % 512
|
||||
optional_size = space + 512 - remainder
|
||||
|
||||
if dbo:
|
||||
# If you specify a size you get a 'thick' snapshot even if
|
||||
# it is a thin lv
|
||||
if not dbo.IsThinVolume:
|
||||
if optional_size == 0:
|
||||
space = dbo.SizeBytes / 80
|
||||
remainder = space % 512
|
||||
optional_size = space + 512 - remainder
|
||||
rc, out, err = cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options, name, optional_size)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options, name, optional_size)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -548,38 +530,24 @@ class Lv(LvCommon):
|
||||
resize_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
pv_dests = []
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# If we have PVs, verify them
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
# If we have PVs, verify them
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
size_change = new_size_bytes - dbo.SizeBytes
|
||||
|
||||
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
|
||||
pv_dests, resize_options)
|
||||
|
||||
if rc == 0:
|
||||
# Refresh what's changed
|
||||
cfg.load()
|
||||
return "/"
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
size_change = new_size_bytes - dbo.SizeBytes
|
||||
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
|
||||
pv_dests, resize_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return "/"
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -612,23 +580,11 @@ class Lv(LvCommon):
|
||||
def _lv_activate_deactivate(uuid, lv_name, activate, control_flags,
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(uuid, lv_name))
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -660,25 +616,11 @@ class Lv(LvCommon):
|
||||
@staticmethod
|
||||
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
|
||||
rc, out, err = cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(uuid, lv_name))
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
rc, out, err = cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
@@ -736,24 +678,13 @@ class LvThinPool(Lv):
|
||||
@staticmethod
|
||||
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes)
|
||||
if rc == 0:
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
rc, out, err = cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=THIN_POOL_INTERFACE,
|
||||
@@ -790,14 +721,13 @@ class LvCachePool(Lv):
|
||||
|
||||
@staticmethod
|
||||
def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
|
||||
|
||||
# Make sure we have a dbus object representing cache pool
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
|
||||
# Make sure we have dbus object representing lv to cache
|
||||
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
|
||||
|
||||
if dbo and lv_to_cache:
|
||||
if lv_to_cache:
|
||||
fcn = lv_to_cache.lv_full_name()
|
||||
rc, out, err = cmdhandler.lv_cache_lv(
|
||||
dbo.lv_full_name(), fcn, cache_options)
|
||||
@@ -809,22 +739,14 @@ class LvCachePool(Lv):
|
||||
cfg.load()
|
||||
|
||||
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
msg = ""
|
||||
if not dbo:
|
||||
dbo += 'CachePool LV with uuid %s and name %s not present!' % \
|
||||
(lv_uuid, lv_name)
|
||||
|
||||
if not lv_to_cache:
|
||||
dbo += 'LV to cache with object path %s not present!' % \
|
||||
(lv_object_path)
|
||||
|
||||
raise dbus.exceptions.DBusException(LV_INTERFACE, msg)
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE, 'LV to cache with object path %s not present!' %
|
||||
lv_object_path)
|
||||
return lv_converted
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -855,31 +777,25 @@ class LvCacheLv(Lv):
|
||||
@staticmethod
|
||||
def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache):
|
||||
# Make sure we have a dbus object representing cache pool
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
|
||||
if dbo:
|
||||
# Get current cache name
|
||||
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
|
||||
|
||||
# Get current cache name
|
||||
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
|
||||
rc, out, err = cmdhandler.lv_detach_cache(
|
||||
dbo.lv_full_name(), detach_options, destroy_cache)
|
||||
if rc == 0:
|
||||
# The cache pool gets removed as hidden and put back to
|
||||
# visible, so lets delete
|
||||
mt_remove_dbus_objects((cache_pool, dbo))
|
||||
cfg.load()
|
||||
|
||||
rc, out, err = cmdhandler.lv_detach_cache(
|
||||
dbo.lv_full_name(), detach_options, destroy_cache)
|
||||
if rc == 0:
|
||||
# The cache pool gets removed as hidden and put back to
|
||||
# visible, so lets delete
|
||||
mt_remove_dbus_objects((cache_pool, dbo))
|
||||
cfg.load()
|
||||
|
||||
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'LV with uuid %s and name %s not present!' %
|
||||
(lv_uuid, lv_name))
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
return uncached_lv_path
|
||||
|
||||
@dbus.service.method(
|
||||
|
||||
@@ -69,18 +69,7 @@ class DataStore(object):
|
||||
table[key] = record
|
||||
|
||||
@staticmethod
|
||||
def _parse_pvs(_pvs):
|
||||
pvs = sorted(_pvs, key=lambda pk: pk['pv_name'])
|
||||
|
||||
c_pvs = OrderedDict()
|
||||
c_lookup = {}
|
||||
c_pvs_in_vgs = {}
|
||||
|
||||
for p in pvs:
|
||||
DataStore._insert_record(
|
||||
c_pvs, p['pv_uuid'], p,
|
||||
['pvseg_start', 'pvseg_size', 'segtype'])
|
||||
|
||||
def _pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup):
|
||||
for p in c_pvs.values():
|
||||
# Capture which PVs are associated with which VG
|
||||
if p['vg_uuid'] not in c_pvs_in_vgs:
|
||||
@@ -93,6 +82,20 @@ class DataStore(object):
|
||||
# Lookup for translating between /dev/<name> and pv uuid
|
||||
c_lookup[p['pv_name']] = p['pv_uuid']
|
||||
|
||||
@staticmethod
|
||||
def _parse_pvs(_pvs):
|
||||
pvs = sorted(_pvs, key=lambda pk: pk['pv_name'])
|
||||
|
||||
c_pvs = OrderedDict()
|
||||
c_lookup = {}
|
||||
c_pvs_in_vgs = {}
|
||||
|
||||
for p in pvs:
|
||||
DataStore._insert_record(
|
||||
c_pvs, p['pv_uuid'], p,
|
||||
['pvseg_start', 'pvseg_size', 'segtype'])
|
||||
|
||||
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
|
||||
return c_pvs, c_lookup, c_pvs_in_vgs
|
||||
|
||||
@staticmethod
|
||||
@@ -132,17 +135,7 @@ class DataStore(object):
|
||||
i['pvseg_size'] = i['pv_pe_count']
|
||||
i['segtype'] = 'free'
|
||||
|
||||
for p in c_pvs.values():
|
||||
# Capture which PVs are associated with which VG
|
||||
if p['vg_uuid'] not in c_pvs_in_vgs:
|
||||
c_pvs_in_vgs[p['vg_uuid']] = []
|
||||
|
||||
if p['vg_name']:
|
||||
c_pvs_in_vgs[p['vg_uuid']].append(
|
||||
(p['pv_name'], p['pv_uuid']))
|
||||
|
||||
# Lookup for translating between /dev/<name> and pv uuid
|
||||
c_lookup[p['pv_name']] = p['pv_uuid']
|
||||
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
|
||||
|
||||
return c_pvs, c_lookup, c_pvs_in_vgs
|
||||
|
||||
|
||||
@@ -30,6 +30,16 @@ class Manager(AutomatedProperties):
|
||||
def Version(self):
|
||||
return dbus.String('1.0.0')
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def _pv_create(device, create_options):
|
||||
|
||||
@@ -41,15 +51,8 @@ class Manager(AutomatedProperties):
|
||||
MANAGER_INTERFACE, "PV Already exists!")
|
||||
|
||||
rc, out, err = cmdhandler.pv_create(create_options, [device])
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
created_pv = cfg.om.get_object_path_by_lvm_id(device)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
return created_pv
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return cfg.om.get_object_path_by_lvm_id(device)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
@@ -76,14 +79,8 @@ class Manager(AutomatedProperties):
|
||||
MANAGER_INTERFACE, 'object path = %s not found' % p)
|
||||
|
||||
rc, out, err = cmdhandler.vg_create(create_options, pv_devices, name)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id(name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return cfg.om.get_object_path_by_lvm_id(name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
@@ -200,15 +197,8 @@ class Manager(AutomatedProperties):
|
||||
activate, cache, device_path,
|
||||
major_minor, scan_options)
|
||||
|
||||
if rc == 0:
|
||||
# This could potentially change the state quite a bit, so lets
|
||||
# update everything to be safe
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
MANAGER_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
Manager.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=MANAGER_INTERFACE,
|
||||
|
||||
@@ -135,23 +135,30 @@ class Pv(AutomatedProperties):
|
||||
def _remove(pv_uuid, pv_name, remove_options):
|
||||
# Remove the PV, if successful then remove from the model
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(pv_uuid, pv_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
if not dbo:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
return '/'
|
||||
return dbo
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=PV_INTERFACE,
|
||||
@@ -168,22 +175,11 @@ class Pv(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _resize(pv_uuid, pv_name, new_size_bytes, resize_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
resize_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -201,21 +197,10 @@ class Pv(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.pv_allocatable(
|
||||
pv_name, yes_no, allocation_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'PV with uuid %s and name %s not present!' %
|
||||
(pv_uuid, pv_name))
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
rc, out, err = cmdhandler.pv_allocatable(
|
||||
pv_name, yes_no, allocation_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
|
||||
@@ -145,29 +145,35 @@ class Vg(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def fetch_new_lv(vg_name, lv_name):
|
||||
cfg.load()
|
||||
return cfg.om.get_object_path_by_lvm_id("%s/%s" % (vg_name, lv_name))
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(vg_uuid, vg_name):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(vg_uuid, vg_name)
|
||||
if not dbo:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(vg_uuid, vg_name))
|
||||
return dbo
|
||||
|
||||
@staticmethod
|
||||
def _rename(uuid, vg_name, new_name, rename_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_rename(vg_name, new_name,
|
||||
rename_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_rename(
|
||||
vg_name, new_name, rename_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -184,24 +190,10 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _remove(uuid, vg_name, remove_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
# Remove the VG, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
# Remove the VG, if successful then remove from the model
|
||||
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -216,26 +208,9 @@ class Vg(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def _change(uuid, vg_name, change_options):
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
|
||||
|
||||
# To use an example with d-feet (Method input)
|
||||
# {"activate": __import__('gi.repository.GLib', globals(),
|
||||
# locals(), ['Variant']).Variant("s", "n")}
|
||||
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
# TODO: This should be broken into a number of different methods
|
||||
@@ -256,34 +231,24 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _reduce(uuid, vg_name, missing, pv_object_paths, reduce_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
pv_devices = []
|
||||
pv_devices = []
|
||||
|
||||
# If pv_object_paths is not empty, then get the device paths
|
||||
if pv_object_paths and len(pv_object_paths) > 0:
|
||||
for pv_op in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(pv_op)
|
||||
if pv:
|
||||
pv_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Object path not found = %s!' % pv_op)
|
||||
# If pv_object_paths is not empty, then get the device paths
|
||||
if pv_object_paths and len(pv_object_paths) > 0:
|
||||
for pv_op in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(pv_op)
|
||||
if pv:
|
||||
pv_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Object path not found = %s!' % pv_op)
|
||||
|
||||
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
|
||||
reduce_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
|
||||
reduce_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -300,36 +265,26 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _extend(uuid, vg_name, pv_object_paths, extend_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
extend_devices = []
|
||||
extend_devices = []
|
||||
|
||||
for i in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(i)
|
||||
if pv:
|
||||
extend_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV Object path not found = %s!' % i)
|
||||
|
||||
if len(extend_devices):
|
||||
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
|
||||
extend_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
for i in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(i)
|
||||
if pv:
|
||||
extend_devices.append(pv.lvm_id)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'No pv_object_paths provided!')
|
||||
VG_INTERFACE, 'PV Object path not found = %s!' % i)
|
||||
|
||||
if len(extend_devices):
|
||||
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
|
||||
extend_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
VG_INTERFACE, 'No pv_object_paths provided!')
|
||||
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -366,33 +321,24 @@ class Vg(AutomatedProperties):
|
||||
create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
pv_dests = []
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
if len(pv_dests_and_ranges):
|
||||
for pr in pv_dests_and_ranges:
|
||||
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
|
||||
if not pv_dbus_obj:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'PV Destination (%s) not found' % pr[0])
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests)
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
if rc == 0:
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
rc, out, err = cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests)
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -428,25 +374,13 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_linear(uuid, vg_name, name, size_bytes,
|
||||
thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool)
|
||||
rc, out, err = cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool)
|
||||
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -466,24 +400,12 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_striped(uuid, vg_name, name, size_bytes, num_stripes,
|
||||
stripe_size_kb, thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_striped(
|
||||
vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_striped(
|
||||
vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -506,25 +428,11 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_mirror(uuid, vg_name, name, size_bytes,
|
||||
num_copies, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -545,26 +453,12 @@ class Vg(AutomatedProperties):
|
||||
def _lv_create_raid(uuid, vg_name, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.vg_lv_create_raid(
|
||||
vg_name, create_options, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb)
|
||||
if rc == 0:
|
||||
created_lv = Vg.fetch_new_lv(vg_name, name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
|
||||
return created_lv
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.vg_lv_create_raid(
|
||||
vg_name, create_options, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -585,33 +479,27 @@ class Vg(AutomatedProperties):
|
||||
def _create_pool(uuid, vg_name, meta_data_lv, data_lv,
|
||||
create_options, create_method):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
# Retrieve the full names for the metadata and data lv
|
||||
md = cfg.om.get_object_by_path(meta_data_lv)
|
||||
data = cfg.om.get_object_by_path(data_lv)
|
||||
|
||||
if dbo and md and data:
|
||||
if md and data:
|
||||
|
||||
new_name = data.Name
|
||||
|
||||
rc, out, err = create_method(
|
||||
md.lv_full_name(), data.lv_full_name(), create_options)
|
||||
|
||||
if rc == 0:
|
||||
mt_remove_dbus_objects((md, data))
|
||||
cache_pool_lv = Vg.fetch_new_lv(vg_name, new_name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
|
||||
else:
|
||||
msg = ""
|
||||
|
||||
if not dbo:
|
||||
msg += 'VG with uuid %s and name %s not present!' % \
|
||||
(uuid, vg_name)
|
||||
|
||||
if not md:
|
||||
msg += 'Meta data LV with object path %s not present!' % \
|
||||
(meta_data_lv)
|
||||
@@ -622,7 +510,7 @@ class Vg(AutomatedProperties):
|
||||
|
||||
raise dbus.exceptions.DBusException(VG_INTERFACE, msg)
|
||||
|
||||
return cache_pool_lv
|
||||
return Vg.fetch_new_lv(vg_name, new_name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -656,33 +544,21 @@ class Vg(AutomatedProperties):
|
||||
pv_devices = []
|
||||
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
# Check for existence of pv object paths
|
||||
for p in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(p)
|
||||
if pv:
|
||||
pv_devices.append(pv.Name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV object path = %s not found' % p)
|
||||
|
||||
rc, out, err = cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
# Check for existence of pv object paths
|
||||
for p in pv_object_paths:
|
||||
pv = cfg.om.get_object_by_path(p)
|
||||
if pv:
|
||||
pv_devices.append(pv.Name)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
VG_INTERFACE, 'PV object path = %s not found' % p)
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
rc, out, err = cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -720,25 +596,12 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _vg_add_rm_tags(uuid, vg_name, tags_add, tags_del, tag_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
|
||||
rc, out, err = cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
rc, out, err = cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -775,23 +638,10 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _vg_change_set(uuid, vg_name, method, value, options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = method(vg_name, value, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = method(vg_name, value, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
@@ -849,23 +699,11 @@ class Vg(AutomatedProperties):
|
||||
def _vg_activate_deactivate(uuid, vg_name, activate, control_flags,
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
|
||||
|
||||
if dbo:
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
return '/'
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'VG with uuid %s and name %s not present!' %
|
||||
(uuid, vg_name))
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_INTERFACE,
|
||||
|
||||
@@ -19,10 +19,12 @@
|
||||
|
||||
#define MIN_ARGV_SIZE 8
|
||||
|
||||
static const char *const const polling_ops[] = { [PVMOVE] = LVMPD_REQ_PVMOVE,
|
||||
[CONVERT] = LVMPD_REQ_CONVERT,
|
||||
[MERGE] = LVMPD_REQ_MERGE,
|
||||
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN };
|
||||
static const char *const polling_ops[] = {
|
||||
[PVMOVE] = LVMPD_REQ_PVMOVE,
|
||||
[CONVERT] = LVMPD_REQ_CONVERT,
|
||||
[MERGE] = LVMPD_REQ_MERGE,
|
||||
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN
|
||||
};
|
||||
|
||||
const char *polling_op(enum poll_type type)
|
||||
{
|
||||
|
||||
14
doc/license.txt
Normal file
14
doc/license.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU Lesser General Public License v.2.1.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -272,10 +272,18 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
return 0;
|
||||
@@ -984,6 +992,30 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
|
||||
}
|
||||
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid data offset and dev sectors for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!(r = dev_manager_raid_status(dm, lv, &status)))
|
||||
stack;
|
||||
|
||||
*data_offset = status->data_offset;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
int r;
|
||||
@@ -1013,6 +1045,32 @@ int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
*dev_cnt = 0;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid device count for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!dev_manager_raid_status(dm, lv, &status)) {
|
||||
dev_manager_destroy(dm);
|
||||
return_0;
|
||||
}
|
||||
*dev_cnt = status->dev_count;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
@@ -1948,16 +2006,13 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
|
||||
|
||||
/* Check [un]monitor results */
|
||||
/* Try a couple times if pending, but not forever... */
|
||||
for (i = 0; i < 40; i++) {
|
||||
for (i = 0;; i++) {
|
||||
pending = 0;
|
||||
monitored = seg->segtype->ops->target_monitored(seg, &pending);
|
||||
if (pending ||
|
||||
(!monitored && monitor) ||
|
||||
(monitored && !monitor))
|
||||
log_very_verbose("%s %smonitoring still pending: waiting...",
|
||||
display_lvname(lv), monitor ? "" : "un");
|
||||
else
|
||||
if (!pending || i >= 40)
|
||||
break;
|
||||
log_very_verbose("%s %smonitoring still pending: waiting...",
|
||||
display_lvname(lv), monitor ? "" : "un");
|
||||
usleep(10000 * i);
|
||||
}
|
||||
|
||||
|
||||
@@ -168,6 +168,8 @@ int lv_snapshot_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int wait, dm_percent_t *percent, uint32_t *event_nr);
|
||||
int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt);
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset);
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health);
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt);
|
||||
int lv_raid_sync_action(const struct logical_volume *lv, char **sync_action);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -61,7 +61,7 @@ struct dev_manager {
|
||||
int flush_required;
|
||||
int activation; /* building activation tree */
|
||||
int suspend; /* building suspend tree */
|
||||
int skip_external_lv;
|
||||
unsigned track_external_lv_deps;
|
||||
struct dm_list pending_delete; /* str_list of dlid(s) with pending delete */
|
||||
unsigned track_pending_delete;
|
||||
unsigned track_pvmove_deps;
|
||||
@@ -214,6 +214,14 @@ typedef enum {
|
||||
STATUS, /* DM_DEVICE_STATUS ioctl */
|
||||
} info_type_t;
|
||||
|
||||
/* Return length of segment depending on type and reshape_len */
|
||||
static uint32_t _seg_len(const struct lv_segment *seg)
|
||||
{
|
||||
uint32_t reshape_len = seg_is_raid(seg) ? ((seg->area_count - seg->segtype->parity_devs) * seg->reshape_len) : 0;
|
||||
|
||||
return seg->len - reshape_len;
|
||||
}
|
||||
|
||||
static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status,
|
||||
@@ -250,7 +258,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
if (seg_status && dminfo->exists) {
|
||||
start = length = seg_status->seg->lv->vg->extent_size;
|
||||
start *= seg_status->seg->le;
|
||||
length *= seg_status->seg->len;
|
||||
length *= _seg_len(seg_status->seg);
|
||||
|
||||
do {
|
||||
target = dm_get_next_target(dmt, target, &target_start,
|
||||
@@ -2039,16 +2047,16 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
#endif
|
||||
}
|
||||
|
||||
if (origin_only && dm->activation && !dm->skip_external_lv &&
|
||||
if (origin_only && dm->activation && dm->track_external_lv_deps &&
|
||||
lv_is_external_origin(lv)) {
|
||||
/* Find possible users of external origin lv */
|
||||
dm->skip_external_lv = 1; /* avoid recursion */
|
||||
dm->track_external_lv_deps = 0; /* avoid recursion */
|
||||
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
|
||||
/* Match only external_lv users */
|
||||
if ((sl->seg->external_lv == lv) &&
|
||||
!_add_lv_to_dtree(dm, dtree, sl->seg->lv, 1))
|
||||
return_0;
|
||||
dm->skip_external_lv = 0;
|
||||
dm->track_external_lv_deps = 1;
|
||||
}
|
||||
|
||||
if (lv_is_thin_pool(lv)) {
|
||||
@@ -2148,7 +2156,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
|
||||
/* Add any LVs used by segments in this LV */
|
||||
dm_list_iterate_items(seg, &lv->segments) {
|
||||
if (seg->external_lv && !dm->skip_external_lv &&
|
||||
if (seg->external_lv && dm->track_external_lv_deps &&
|
||||
!_add_lv_to_dtree(dm, dtree, seg->external_lv, 1)) /* stack */
|
||||
return_0;
|
||||
if (seg->log_lv &&
|
||||
@@ -2158,7 +2166,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
|
||||
return_0;
|
||||
if (seg->pool_lv &&
|
||||
(lv_is_cache_pool(seg->pool_lv) || !dm->skip_external_lv) &&
|
||||
(lv_is_cache_pool(seg->pool_lv) || dm->track_external_lv_deps) &&
|
||||
/* When activating and not origin_only detect linear 'overlay' over pool */
|
||||
!_add_lv_to_dtree(dm, dtree, seg->pool_lv, dm->activation ? origin_only : 1))
|
||||
return_0;
|
||||
@@ -2214,7 +2222,7 @@ static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *d
|
||||
struct lv_segment *seg_i;
|
||||
struct dm_info info;
|
||||
int segno = -1, i = 0;
|
||||
uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
|
||||
uint64_t size = (uint64_t) _seg_len(seg) * seg->lv->vg->extent_size;
|
||||
|
||||
dm_list_iterate_items(seg_i, &seg->lv->segments) {
|
||||
if (seg == seg_i) {
|
||||
@@ -2500,7 +2508,7 @@ static int _add_target_to_dtree(struct dev_manager *dm,
|
||||
return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
|
||||
&dm->target_state, seg,
|
||||
laopts, dnode,
|
||||
extent_size * seg->len,
|
||||
extent_size * _seg_len(seg),
|
||||
&dm->pvmove_mirror_count);
|
||||
}
|
||||
|
||||
@@ -2575,7 +2583,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
struct seg_list *sl;
|
||||
|
||||
/* Do not want to recursively add externals again */
|
||||
if (dm->skip_external_lv)
|
||||
if (!dm->track_external_lv_deps)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
@@ -2583,7 +2591,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
* process all LVs related to this LV, and we want to
|
||||
* skip repeated invocation of external lv processing
|
||||
*/
|
||||
dm->skip_external_lv = 1;
|
||||
dm->track_external_lv_deps = 0;
|
||||
|
||||
log_debug_activation("Adding external origin LV %s and all active users.",
|
||||
display_lvname(external_lv));
|
||||
@@ -2609,7 +2617,7 @@ static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
log_debug_activation("Finished adding external origin LV %s and all active users.",
|
||||
display_lvname(external_lv));
|
||||
|
||||
dm->skip_external_lv = 0;
|
||||
dm->track_external_lv_deps = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -2693,7 +2701,7 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
|
||||
/* Replace target and all its used devs with error mapping */
|
||||
log_debug_activation("Using error for pending delete %s.",
|
||||
display_lvname(seg->lv));
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * seg->len))
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * _seg_len(seg)))
|
||||
return_0;
|
||||
} else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
|
||||
return_0;
|
||||
@@ -3075,7 +3083,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
(laopts->origin_only) ? " origin-only" : "",
|
||||
display_lvname(lv));
|
||||
|
||||
/* Some LV can be used for top level tree */
|
||||
/* Some LV cannot be used for top level tree */
|
||||
/* TODO: add more.... */
|
||||
if (lv_is_cache_pool(lv) && !dm_list_empty(&lv->segs_using_this_lv)) {
|
||||
log_error(INTERNAL_ERROR "Cannot create tree for %s.",
|
||||
@@ -3085,6 +3093,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
/* Some targets may build bigger tree for activation */
|
||||
dm->activation = ((action == PRELOAD) || (action == ACTIVATE));
|
||||
dm->suspend = (action == SUSPEND_WITH_LOCKFS) || (action == SUSPEND);
|
||||
dm->track_external_lv_deps = 1;
|
||||
|
||||
if (!(dtree = _create_partial_dtree(dm, lv, laopts->origin_only)))
|
||||
return_0;
|
||||
@@ -3164,7 +3173,6 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
log_error(INTERNAL_ERROR "_tree_action: Action %u not supported.", action);
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = 1;
|
||||
|
||||
out:
|
||||
|
||||
@@ -88,11 +88,20 @@ struct cmd_context {
|
||||
* Command line and arguments.
|
||||
*/
|
||||
const char *cmd_line;
|
||||
const char *name; /* needed before cmd->command is set */
|
||||
struct command_name *cname;
|
||||
struct command *command;
|
||||
char **argv;
|
||||
struct arg_values *arg_values;
|
||||
struct arg_values *opt_arg_values;
|
||||
struct dm_list arg_value_groups;
|
||||
|
||||
/*
|
||||
* Position args remaining after command name
|
||||
* and --options are removed from original argc/argv.
|
||||
*/
|
||||
int position_argc;
|
||||
char **position_argv;
|
||||
|
||||
/*
|
||||
* Format handlers.
|
||||
*/
|
||||
|
||||
@@ -1221,14 +1221,15 @@ cfg_array(activation_read_only_volume_list_CFG, "read_only_volume_list", activat
|
||||
"read_only_volume_list = [ \"vg1\", \"vg2/lvol1\", \"@tag1\", \"@*\" ]\n"
|
||||
"#\n")
|
||||
|
||||
cfg(activation_mirror_region_size_CFG, "mirror_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(1, 0, 0), NULL, vsn(2, 2, 99),
|
||||
cfg(activation_mirror_region_size_CFG, "mirror_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(1, 0, 0), NULL, vsn(2, 2, 99),
|
||||
"This has been replaced by the activation/raid_region_size setting.\n",
|
||||
"Size in KiB of each copy operation when mirroring.\n")
|
||||
"Size in KiB of each raid or mirror synchronization region.\n")
|
||||
|
||||
cfg(activation_raid_region_size_CFG, "raid_region_size", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RAID_REGION_SIZE, vsn(2, 2, 99), NULL, 0, NULL,
|
||||
"Size in KiB of each raid or mirror synchronization region.\n"
|
||||
"For raid or mirror segment types, this is the amount of data that is\n"
|
||||
"copied at once when initializing, or moved at once by pvmove.\n")
|
||||
"The clean/dirty state of data is tracked for each region.\n"
|
||||
"The value is rounded down to a power of two if necessary, and\n"
|
||||
"is ignored if it is not a multiple of the machine memory page size.\n")
|
||||
|
||||
cfg(activation_error_when_full_CFG, "error_when_full", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_ERROR_WHEN_FULL, vsn(2, 2, 115), NULL, 0, NULL,
|
||||
"Return errors if a thin pool runs out of space.\n"
|
||||
@@ -1863,7 +1864,9 @@ cfg(dmeventd_thin_command_CFG, "thin_command", dmeventd_CFG_SECTION, CFG_DEFAULT
|
||||
"The plugin runs command with each 5% increment when thin-pool data volume\n"
|
||||
"or metadata volume gets above 50%.\n"
|
||||
"Command which starts with 'lvm ' prefix is internal lvm command.\n"
|
||||
"You can write your own handler to customise behaviour in more details.\n")
|
||||
"You can write your own handler to customise behaviour in more details.\n"
|
||||
"User handler is specified with the full path starting with '/'.\n")
|
||||
/* TODO: systemd service handler */
|
||||
|
||||
cfg(dmeventd_executable_CFG, "executable", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_PATH, vsn(2, 2, 73), "@DMEVENTD_PATH@", 0, NULL,
|
||||
"The full path to the dmeventd binary.\n")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -71,7 +71,7 @@
|
||||
* FIXME: Increase these to 64 and further to the MD maximum
|
||||
* once the SubLVs split and name shift got enhanced
|
||||
*/
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 10
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 64
|
||||
#define DEFAULT_RAID_MAX_IMAGES 64
|
||||
#define DEFAULT_ALLOCATION_STRIPE_ALL_DEVICES 0 /* Don't stripe across all devices if not -i/--stripes given */
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -225,8 +225,8 @@ static int _read_linear(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
while (le < lvm->lv->le_count) {
|
||||
len = _area_length(lvm, le);
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate linear segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -297,10 +297,10 @@ static int _read_stripes(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv,
|
||||
lvm->stripes * first_area_le,
|
||||
lvm->stripes * area_len,
|
||||
lvm->stripes * area_len, 0,
|
||||
0, lvm->stripe_size, NULL,
|
||||
lvm->stripes,
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate striped segment.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -192,9 +192,9 @@ static int _add_stripe_seg(struct dm_pool *mem,
|
||||
return_0;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len * usp->num_devs, 0,
|
||||
area_len * usp->num_devs, 0, 0,
|
||||
usp->striping, NULL, usp->num_devs,
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate striped lv_segment structure");
|
||||
return 0;
|
||||
}
|
||||
@@ -232,8 +232,8 @@ static int _add_linear_seg(struct dm_pool *mem,
|
||||
area_len = (usp->devs[j].blocks) / POOL_PE_SIZE;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len, 0, usp->striping,
|
||||
NULL, 1, area_len,
|
||||
area_len, 0, 0, usp->striping,
|
||||
NULL, 1, area_len, 0,
|
||||
POOL_PE_SIZE, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate linear lv_segment "
|
||||
"structure");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -583,8 +583,10 @@ static int _print_segment(struct formatter *f, struct volume_group *vg,
|
||||
outf(f, "start_extent = %u", seg->le);
|
||||
outsize(f, (uint64_t) seg->len * vg->extent_size,
|
||||
"extent_count = %u", seg->len);
|
||||
|
||||
outnl(f);
|
||||
if (seg->reshape_len)
|
||||
outsize(f, (uint64_t) seg->reshape_len * vg->extent_size,
|
||||
"reshape_count = %u", seg->reshape_len);
|
||||
outf(f, "type = \"%s\"", seg->segtype->name);
|
||||
|
||||
if (!_out_list(f, &seg->tags, "tags"))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -61,6 +61,9 @@ static const struct flag _lv_flags[] = {
|
||||
{LOCKED, "LOCKED", STATUS_FLAG},
|
||||
{LV_NOTSYNCED, "NOTSYNCED", STATUS_FLAG},
|
||||
{LV_REBUILD, "REBUILD", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_PLUS, "RESHAPE_DELTA_DISKS_PLUS", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_MINUS, "RESHAPE_DELTA_DISKS_MINUS", STATUS_FLAG},
|
||||
{LV_REMOVE_AFTER_RESHAPE, "REMOVE_AFTER_RESHAPE", STATUS_FLAG},
|
||||
{LV_WRITEMOSTLY, "WRITEMOSTLY", STATUS_FLAG},
|
||||
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
|
||||
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -354,7 +354,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
struct lv_segment *seg;
|
||||
const struct dm_config_node *sn_child = sn->child;
|
||||
const struct dm_config_value *cv;
|
||||
uint32_t start_extent, extent_count;
|
||||
uint32_t area_extents, start_extent, extent_count, reshape_count, data_copies;
|
||||
struct segment_type *segtype;
|
||||
const char *segtype_str;
|
||||
|
||||
@@ -375,6 +375,12 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_read_int32(sn_child, "reshape_count", &reshape_count))
|
||||
reshape_count = 0;
|
||||
|
||||
if (!_read_int32(sn_child, "data_copies", &data_copies))
|
||||
data_copies = 1;
|
||||
|
||||
segtype_str = SEG_TYPE_NAME_STRIPED;
|
||||
|
||||
if (!dm_config_get_str(sn_child, "type", &segtype_str)) {
|
||||
@@ -389,9 +395,11 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
!segtype->ops->text_import_area_count(sn_child, &area_count))
|
||||
return_0;
|
||||
|
||||
area_extents = segtype->parity_devs ?
|
||||
raid_rimage_extents(segtype, extent_count, area_count - segtype->parity_devs, data_copies) : extent_count;
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
|
||||
extent_count, 0, 0, NULL, area_count,
|
||||
extent_count, 0, 0, 0, NULL))) {
|
||||
extent_count, reshape_count, 0, 0, NULL, area_count,
|
||||
area_extents, data_copies, 0, 0, 0, NULL))) {
|
||||
log_error("Segment allocation failed");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -1278,6 +1278,8 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
repstr[8] = 'm'; /* RAID has 'm'ismatches */
|
||||
} else if (lv->status & LV_WRITEMOSTLY)
|
||||
repstr[8] = 'w'; /* sub-LV has 'w'ritemostly */
|
||||
else if (lv->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
repstr[8] = 'R'; /* sub-LV got 'R'emoved from raid set by reshaping */
|
||||
} else if (lvdm->seg_status.type == SEG_STATUS_CACHE) {
|
||||
if (lvdm->seg_status.cache->fail)
|
||||
repstr[8] = 'F';
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -21,11 +21,13 @@
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -319,7 +319,6 @@ static int _lv_layout_and_role_thin(struct dm_pool *mem,
|
||||
{
|
||||
int top_level = 0;
|
||||
unsigned snap_count;
|
||||
struct lv_segment *seg;
|
||||
|
||||
/* non-top-level LVs */
|
||||
if (lv_is_thin_pool_metadata(lv)) {
|
||||
@@ -353,7 +352,7 @@ static int _lv_layout_and_role_thin(struct dm_pool *mem,
|
||||
!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_MULTITHINORIGIN]))
|
||||
goto_bad;
|
||||
}
|
||||
if ((seg = first_seg(lv)) && (seg->origin || seg->external_lv))
|
||||
if (lv_is_thin_snapshot(lv))
|
||||
if (!str_list_add(mem, role, _lv_type_names[LV_TYPE_SNAPSHOT]) ||
|
||||
!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_THINSNAPSHOT]))
|
||||
goto_bad;
|
||||
@@ -713,6 +712,7 @@ static int _round_down_pow2(int r)
|
||||
|
||||
int get_default_region_size(struct cmd_context *cmd)
|
||||
{
|
||||
int pagesize = lvm_getpagesize();
|
||||
int region_size = _get_default_region_size(cmd);
|
||||
|
||||
if (!is_power_of_2(region_size)) {
|
||||
@@ -721,6 +721,12 @@ int get_default_region_size(struct cmd_context *cmd)
|
||||
region_size / 2);
|
||||
}
|
||||
|
||||
if (region_size % (pagesize >> SECTOR_SHIFT)) {
|
||||
region_size = DEFAULT_RAID_REGION_SIZE * 2;
|
||||
log_verbose("Using default region size %u kiB (multiple of page size).",
|
||||
region_size / 2);
|
||||
}
|
||||
|
||||
return region_size;
|
||||
}
|
||||
|
||||
@@ -891,8 +897,9 @@ static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t exte
|
||||
/* Round up extents to stripe divisible amount */
|
||||
if ((size_rest = extents % stripes)) {
|
||||
new_extents += extend ? stripes - size_rest : -size_rest;
|
||||
log_print_unless_silent("Rounding size %s (%u extents) up to stripe boundary size %s (%u extents).",
|
||||
log_print_unless_silent("Rounding size %s (%u extents) %s to stripe boundary size %s(%u extents).",
|
||||
display_size(vg->cmd, (uint64_t) extents * vg->extent_size), extents,
|
||||
new_extents < extents ? "down" : "up",
|
||||
display_size(vg->cmd, (uint64_t) new_extents * vg->extent_size), new_extents);
|
||||
}
|
||||
|
||||
@@ -905,11 +912,13 @@ static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t exte
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
@@ -943,10 +952,12 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
seg->lv = lv;
|
||||
seg->le = le;
|
||||
seg->len = len;
|
||||
seg->reshape_len = reshape_len;
|
||||
seg->status = status;
|
||||
seg->stripe_size = stripe_size;
|
||||
seg->area_count = area_count;
|
||||
seg->area_len = area_len;
|
||||
seg->data_copies = data_copies ? : lv_raid_data_copies(segtype, area_count);
|
||||
seg->chunk_size = chunk_size;
|
||||
seg->region_size = region_size;
|
||||
seg->extents_copied = extents_copied;
|
||||
@@ -967,6 +978,37 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
return seg;
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporary helper to return number of data copies for
|
||||
* RAID segment @seg until seg->data_copies got added
|
||||
*/
|
||||
static uint32_t _raid_data_copies(struct lv_segment *seg)
|
||||
{
|
||||
/*
|
||||
* FIXME: needs to change once more than 2 are supported.
|
||||
* I.e. use seg->data_copies then
|
||||
*/
|
||||
if (seg_is_raid10(seg))
|
||||
return 2;
|
||||
else if (seg_is_raid1(seg))
|
||||
return seg->area_count;
|
||||
|
||||
return seg->segtype->parity_devs + 1;
|
||||
}
|
||||
|
||||
/* Data image count for RAID segment @seg */
|
||||
static uint32_t _raid_stripes_count(struct lv_segment *seg)
|
||||
{
|
||||
/*
|
||||
* FIXME: raid10 needs to change once more than
|
||||
* 2 data_copies and odd # of legs supported.
|
||||
*/
|
||||
if (seg_is_raid10(seg))
|
||||
return seg->area_count / _raid_data_copies(seg);
|
||||
|
||||
return seg->area_count - seg->segtype->parity_devs;
|
||||
}
|
||||
|
||||
static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t s,
|
||||
uint32_t area_reduction, int with_discard)
|
||||
{
|
||||
@@ -1007,32 +1049,39 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
|
||||
}
|
||||
|
||||
if (lv_is_raid_image(lv)) {
|
||||
/*
|
||||
* FIXME: Use lv_reduce not lv_remove
|
||||
* We use lv_remove for now, because I haven't figured out
|
||||
* why lv_reduce won't remove the LV.
|
||||
lv_reduce(lv, area_reduction);
|
||||
*/
|
||||
if (area_reduction != seg->area_len) {
|
||||
log_error("Unable to reduce RAID LV - operation not implemented.");
|
||||
/* Calculate the amount of extents to reduce per rmate/rimage LV */
|
||||
uint32_t rimage_extents;
|
||||
struct lv_segment *seg1 = first_seg(lv);
|
||||
|
||||
/* FIXME: avoid extra seg_is_*() conditionals here */
|
||||
rimage_extents = raid_rimage_extents(seg1->segtype, area_reduction, seg_is_any_raid0(seg) ? 0 : _raid_stripes_count(seg),
|
||||
seg_is_raid10(seg) ? 1 :_raid_data_copies(seg));
|
||||
if (!rimage_extents)
|
||||
return 0;
|
||||
} else {
|
||||
if (!lv_remove(lv)) {
|
||||
log_error("Failed to remove RAID image %s.",
|
||||
display_lvname(lv));
|
||||
|
||||
if (seg->meta_areas) {
|
||||
uint32_t meta_area_reduction;
|
||||
struct logical_volume *mlv;
|
||||
struct volume_group *vg = lv->vg;
|
||||
|
||||
if (seg_metatype(seg, s) != AREA_LV ||
|
||||
!(mlv = seg_metalv(seg, s)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
meta_area_reduction = raid_rmeta_extents_delta(vg->cmd, lv->le_count, lv->le_count - rimage_extents,
|
||||
seg->region_size, vg->extent_size);
|
||||
/* Limit for raid0_meta not having region size set */
|
||||
if (meta_area_reduction > mlv->le_count ||
|
||||
!(lv->le_count - rimage_extents))
|
||||
meta_area_reduction = mlv->le_count;
|
||||
|
||||
if (meta_area_reduction &&
|
||||
!lv_reduce(mlv, meta_area_reduction))
|
||||
return_0; /* FIXME: any upper level reporting */
|
||||
}
|
||||
|
||||
/* Remove metadata area if image has been removed */
|
||||
if (seg->meta_areas && seg_metalv(seg, s) && (area_reduction == seg->area_len)) {
|
||||
if (!lv_reduce(seg_metalv(seg, s),
|
||||
seg_metalv(seg, s)->le_count)) {
|
||||
log_error("Failed to remove RAID meta-device %s.",
|
||||
display_lvname(seg_metalv(seg, s)));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (!lv_reduce(lv, rimage_extents))
|
||||
return_0; /* FIXME: any upper level reporting */
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1212,7 +1261,7 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
* the 'stripes' argument will always need to
|
||||
* be given.
|
||||
*/
|
||||
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
|
||||
if (segtype_is_raid10(segtype)) {
|
||||
if (!stripes)
|
||||
return area_count / 2;
|
||||
return stripes;
|
||||
@@ -1232,25 +1281,35 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
{
|
||||
uint32_t area_reduction, s;
|
||||
uint32_t areas = (seg->area_count / (seg_is_raid10(seg) ? seg->data_copies : 1)) - seg->segtype->parity_devs;
|
||||
|
||||
/* Caller must ensure exact divisibility */
|
||||
if (seg_is_striped(seg)) {
|
||||
if (reduction % seg->area_count) {
|
||||
// if (!seg_is_raid10(seg) && (seg_is_striped(seg) || seg_is_striped_raid(seg))) {
|
||||
if (seg_is_striped(seg) || seg_is_striped_raid(seg)) {
|
||||
if (reduction % areas) {
|
||||
log_error("Segment extent reduction %" PRIu32
|
||||
" not divisible by #stripes %" PRIu32,
|
||||
reduction, seg->area_count);
|
||||
return 0;
|
||||
}
|
||||
area_reduction = (reduction / seg->area_count);
|
||||
area_reduction = reduction / areas;
|
||||
} else
|
||||
area_reduction = reduction;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (!release_and_discard_lv_segment_area(seg, s, area_reduction))
|
||||
return_0;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
seg->len -= reduction;
|
||||
seg->area_len -= area_reduction;
|
||||
//pprintf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len -= area_reduction;
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1260,11 +1319,13 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
*/
|
||||
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
{
|
||||
struct lv_segment *seg;
|
||||
struct lv_segment *seg = first_seg(lv);;
|
||||
uint32_t count = extents;
|
||||
uint32_t reduction;
|
||||
struct logical_volume *pool_lv;
|
||||
struct logical_volume *external_lv = NULL;
|
||||
int is_raid10 = seg_is_any_raid10(seg) && seg->reshape_len;
|
||||
uint32_t data_copies = seg->data_copies;
|
||||
|
||||
if (lv_is_merging_origin(lv)) {
|
||||
log_debug_metadata("Dropping snapshot merge of %s to removed origin %s.",
|
||||
@@ -1272,6 +1333,7 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
clear_snapshot_merge(lv);
|
||||
}
|
||||
|
||||
//printf("%s[%u] lv=%s is_raid10=%d le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u seg->reshape_len=%u\n", __func__, __LINE__, lv->name, is_raid10, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711, seg->reshape_len);
|
||||
dm_list_iterate_back_items(seg, &lv->segments) {
|
||||
if (!count)
|
||||
break;
|
||||
@@ -1327,11 +1389,21 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
count -= reduction;
|
||||
}
|
||||
|
||||
lv->le_count -= extents;
|
||||
seg = first_seg(lv);
|
||||
//printf("%s[%u] lv=%s le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
if (is_raid10) {
|
||||
lv->le_count -= extents * data_copies;
|
||||
if (seg)
|
||||
seg->len = seg->area_len = lv->le_count;
|
||||
} else
|
||||
lv->le_count -= extents;
|
||||
|
||||
lv->size = (uint64_t) lv->le_count * lv->vg->extent_size;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (!delete)
|
||||
return 1;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (lv == lv->vg->pool_metadata_spare_lv) {
|
||||
lv->status &= ~POOL_METADATA_SPARE;
|
||||
@@ -1439,6 +1511,13 @@ int lv_refresh_suspend_resume(const struct logical_volume *lv)
|
||||
*/
|
||||
int lv_reduce(struct logical_volume *lv, uint32_t extents)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
/* Ensure stipe boundary extents on RAID LVs */
|
||||
if (lv_is_raid(lv) && extents != lv->le_count)
|
||||
extents =_round_to_stripe_boundary(lv->vg, extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg), 0);
|
||||
|
||||
return _lv_reduce(lv, extents, 1);
|
||||
}
|
||||
|
||||
@@ -1740,10 +1819,10 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
area_multiple = _calc_area_multiple(segtype, area_count, 0);
|
||||
extents = aa[0].len * area_multiple;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
status, stripe_size, NULL,
|
||||
area_count,
|
||||
aa[0].len, 0u, region_size, 0u, NULL))) {
|
||||
aa[0].len, 0, 0u, region_size, 0u, NULL))) {
|
||||
log_error("Couldn't allocate new LV segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -1755,7 +1834,7 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
dm_list_add(&lv->segments, &seg->list);
|
||||
|
||||
extents = aa[0].len * area_multiple;
|
||||
|
||||
//printf("%s[%u] le_count=%u extents=%u\n", __func__, __LINE__, lv->le_count, extents);
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
@@ -3181,9 +3260,9 @@ int lv_add_virtual_segment(struct logical_volume *lv, uint64_t status,
|
||||
seg->area_len += extents;
|
||||
seg->len += extents;
|
||||
} else {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
status, 0, NULL, 0,
|
||||
extents, 0, 0, 0, NULL))) {
|
||||
extents, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new %s segment.", segtype->name);
|
||||
return 0;
|
||||
}
|
||||
@@ -3302,19 +3381,24 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
|
||||
|
||||
if (segtype_is_raid(segtype)) {
|
||||
if (metadata_area_count) {
|
||||
uint32_t cur_rimage_extents, new_rimage_extents;
|
||||
|
||||
if (metadata_area_count != area_count)
|
||||
log_error(INTERNAL_ERROR
|
||||
"Bad metadata_area_count");
|
||||
ah->metadata_area_count = area_count;
|
||||
ah->alloc_and_split_meta = 1;
|
||||
|
||||
ah->log_len = RAID_METADATA_AREA_LEN;
|
||||
|
||||
/* Calculate log_len (i.e. length of each rmeta device) for RAID */
|
||||
cur_rimage_extents = raid_rimage_extents(segtype, existing_extents, stripes, mirrors);
|
||||
new_rimage_extents = raid_rimage_extents(segtype, existing_extents + new_extents, stripes, mirrors),
|
||||
ah->log_len = raid_rmeta_extents_delta(cmd, cur_rimage_extents, new_rimage_extents,
|
||||
region_size, extent_size);
|
||||
ah->metadata_area_count = metadata_area_count;
|
||||
ah->alloc_and_split_meta = !!ah->log_len;
|
||||
/*
|
||||
* We need 'log_len' extents for each
|
||||
* RAID device's metadata_area
|
||||
*/
|
||||
total_extents += (ah->log_len * ah->area_multiple);
|
||||
total_extents += ah->log_len * (segtype_is_raid1(segtype) ? 1 : ah->area_multiple);
|
||||
} else {
|
||||
ah->log_area_count = 0;
|
||||
ah->log_len = 0;
|
||||
@@ -3504,10 +3588,10 @@ static struct lv_segment *_convert_seg_to_mirror(struct lv_segment *seg,
|
||||
}
|
||||
|
||||
if (!(newseg = alloc_lv_segment(get_segtype_from_string(seg->lv->vg->cmd, SEG_TYPE_NAME_MIRROR),
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->lv, seg->le, seg->len, 0,
|
||||
seg->status, seg->stripe_size,
|
||||
log_lv,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->area_count, seg->area_len, 0,
|
||||
seg->chunk_size, region_size,
|
||||
seg->extents_copied, NULL))) {
|
||||
log_error("Couldn't allocate converted LV segment.");
|
||||
@@ -3609,8 +3693,8 @@ int lv_add_segmented_mirror_image(struct alloc_handle *ah,
|
||||
}
|
||||
|
||||
if (!(new_seg = alloc_lv_segment(segtype, copy_lv,
|
||||
seg->le, seg->len, PVMOVE, 0,
|
||||
NULL, 1, seg->len,
|
||||
seg->le, seg->len, 0, PVMOVE, 0,
|
||||
NULL, 1, seg->len, 0,
|
||||
0, 0, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
@@ -3805,9 +3889,9 @@ static int _lv_insert_empty_sublvs(struct logical_volume *lv,
|
||||
/*
|
||||
* First, create our top-level segment for our top-level LV
|
||||
*/
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, lv->status,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, 0, lv->status,
|
||||
stripe_size, NULL,
|
||||
devices, 0, 0, region_size, 0, NULL))) {
|
||||
devices, 0, 0, 0, region_size, 0, NULL))) {
|
||||
log_error("Failed to create mapping segment for %s.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
@@ -4005,25 +4089,16 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
lv_set_hidden(seg_metalv(seg, s));
|
||||
}
|
||||
|
||||
seg->area_len += extents / area_multiple;
|
||||
seg->len += extents;
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len += extents / area_multiple;
|
||||
|
||||
//pprintf("%s[%u] le_count=%u extents=%u seg->len=%u seg-area_len=%u\n", __func__, __LINE__, lv->le_count, extents, seg->len, seg->area_len);
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* The MD bitmap is limited to being able to track 2^21 regions.
|
||||
* The region_size must be adjusted to meet that criteria
|
||||
* unless raid0/raid0_meta, which doesn't have a bitmap.
|
||||
*/
|
||||
if (seg_is_raid(seg) && !seg_is_any_raid0(seg))
|
||||
while (seg->region_size < (lv->size / (1 << 21))) {
|
||||
seg->region_size *= 2;
|
||||
log_very_verbose("Adjusting RAID region_size from %uS to %uS"
|
||||
" to support large LV size",
|
||||
seg->region_size/2, seg->region_size);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4050,6 +4125,7 @@ int lv_extend(struct logical_volume *lv,
|
||||
uint32_t sub_lv_count;
|
||||
uint32_t old_extents;
|
||||
uint32_t new_extents; /* Total logical size after extension. */
|
||||
uint64_t raid_size;
|
||||
|
||||
log_very_verbose("Adding segment of type %s to LV %s.", segtype->name, lv->name);
|
||||
|
||||
@@ -4071,6 +4147,22 @@ int lv_extend(struct logical_volume *lv,
|
||||
}
|
||||
/* FIXME log_count should be 1 for mirrors */
|
||||
|
||||
if (segtype_is_raid(segtype) && !segtype_is_any_raid0(segtype)) {
|
||||
raid_size = ((uint64_t) lv->le_count + extents) * lv->vg->extent_size;
|
||||
|
||||
/*
|
||||
* The MD bitmap is limited to being able to track 2^21 regions.
|
||||
* The region_size must be adjusted to meet that criteria
|
||||
* unless raid0/raid0_meta, which doesn't have a bitmap.
|
||||
*/
|
||||
|
||||
region_size = raid_ensure_min_region_size(lv, raid_size, region_size);
|
||||
|
||||
if (first_seg(lv))
|
||||
first_seg(lv)->region_size = region_size;
|
||||
|
||||
}
|
||||
|
||||
if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors,
|
||||
log_count, region_size, extents,
|
||||
allocatable_pvs, alloc, approx_alloc, NULL)))
|
||||
@@ -4651,6 +4743,11 @@ static uint32_t lvseg_get_stripes(struct lv_segment *seg, uint32_t *stripesize)
|
||||
return seg->area_count;
|
||||
}
|
||||
|
||||
if (seg_is_raid(seg)) {
|
||||
*stripesize = seg->stripe_size;
|
||||
return _raid_stripes_count(seg);
|
||||
}
|
||||
|
||||
*stripesize = 0;
|
||||
return 0;
|
||||
}
|
||||
@@ -5316,6 +5413,7 @@ int lv_resize(struct logical_volume *lv,
|
||||
struct logical_volume *lock_lv = (struct logical_volume*) lv_lock_holder(lv);
|
||||
struct logical_volume *aux_lv = NULL; /* Note: aux_lv never resizes fs */
|
||||
struct lvresize_params aux_lp;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
int activated = 0;
|
||||
int ret = 0;
|
||||
int status;
|
||||
@@ -5357,6 +5455,11 @@ int lv_resize(struct logical_volume *lv,
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure stripe boundary extents! */
|
||||
if (!lp->percent && lv_is_raid(lv))
|
||||
lp->extents =_round_to_stripe_boundary(lv->vg, lp->extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg),
|
||||
lp->resize == LV_REDUCE ? 0 : 1);
|
||||
if (aux_lv && !_lvresize_prepare(&aux_lv, &aux_lp, pvh))
|
||||
return_0;
|
||||
|
||||
@@ -6227,7 +6330,6 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
|
||||
|
||||
log_very_verbose("Updating logical volume %s on disk(s)%s.",
|
||||
display_lvname(lock_lv), origin_only ? " (origin only)": "");
|
||||
|
||||
if (!vg_write(vg))
|
||||
return_0;
|
||||
|
||||
@@ -6694,8 +6796,8 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
|
||||
return_NULL;
|
||||
|
||||
/* allocate a new linear segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count,
|
||||
status, 0, NULL, 1, layer_lv->le_count,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count, 0,
|
||||
status, 0, NULL, 1, layer_lv->le_count, 0,
|
||||
0, 0, 0, NULL)))
|
||||
return_NULL;
|
||||
|
||||
@@ -6751,8 +6853,8 @@ static int _extend_layer_lv_for_segment(struct logical_volume *layer_lv,
|
||||
|
||||
/* allocate a new segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, layer_lv, layer_lv->le_count,
|
||||
seg->area_len, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, seg)))
|
||||
seg->area_len, 0, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, 0, seg)))
|
||||
return_0;
|
||||
|
||||
/* map the new segment to the original underlying are */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -148,7 +148,15 @@ static void _check_raid1_seg(struct lv_segment *seg, int *error_count)
|
||||
static void _check_raid45610_seg(struct lv_segment *seg, int *error_count)
|
||||
{
|
||||
/* Checks applying to any raid4/5/6/10 */
|
||||
if (!seg->meta_areas)
|
||||
/*
|
||||
* Allow raid4 + raid5_n to get activated w/o metadata.
|
||||
*
|
||||
* This is mandatory during conversion between them,
|
||||
* because switching the dedicated parity SubLVs
|
||||
* beginning <-> end changes the roles of all SubLVs
|
||||
* which the kernel would reject.
|
||||
*/
|
||||
if (!(seg_is_raid4(seg) || seg_is_raid5_n(seg)) && !seg->meta_areas)
|
||||
raid_seg_error("no meta areas");
|
||||
if (!seg->stripe_size)
|
||||
raid_seg_error("zero stripe size");
|
||||
@@ -228,7 +236,7 @@ static void _check_raid_seg(struct lv_segment *seg, int *error_count)
|
||||
if (!seg->areas)
|
||||
raid_seg_error("zero areas");
|
||||
|
||||
if (seg->extents_copied > seg->area_len)
|
||||
if (seg->extents_copied > seg->len)
|
||||
raid_seg_error_val("extents_copied too large", seg->extents_copied);
|
||||
|
||||
/* Default < 10, change once raid1 split shift and rename SubLVs works! */
|
||||
@@ -467,7 +475,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
struct lv_segment *seg, *seg2;
|
||||
uint32_t le = 0;
|
||||
unsigned seg_count = 0, seg_found, external_lv_found = 0;
|
||||
uint32_t area_multiplier, s;
|
||||
uint32_t data_rimage_count, s;
|
||||
struct seg_list *sl;
|
||||
struct glv_list *glvl;
|
||||
int error_count = 0;
|
||||
@@ -490,13 +498,14 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
area_multiplier = segtype_is_striped(seg->segtype) ?
|
||||
seg->area_count : 1;
|
||||
|
||||
if (seg->area_len * area_multiplier != seg->len) {
|
||||
log_error("LV %s: segment %u has inconsistent "
|
||||
"area_len %u",
|
||||
lv->name, seg_count, seg->area_len);
|
||||
data_rimage_count = seg->area_count - seg->segtype->parity_devs;
|
||||
/* FIXME: raid varies seg->area_len? */
|
||||
if (seg->len != seg->area_len &&
|
||||
seg->len != seg->area_len * data_rimage_count) {
|
||||
log_error("LV %s: segment %u with len=%u "
|
||||
" has inconsistent area_len %u",
|
||||
lv->name, seg_count, seg->len, seg->area_len);
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
@@ -610,7 +619,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
continue;
|
||||
if (lv == seg_lv(seg, s))
|
||||
seg_found++;
|
||||
if (seg_is_raid_with_meta(seg) && (lv == seg_metalv(seg, s)))
|
||||
if (seg->meta_areas && seg_is_raid_with_meta(seg) && (lv == seg_metalv(seg, s)))
|
||||
seg_found++;
|
||||
}
|
||||
if (seg_is_replicator_dev(seg)) {
|
||||
@@ -758,10 +767,10 @@ static int _lv_split_segment(struct logical_volume *lv, struct lv_segment *seg,
|
||||
|
||||
/* Clone the existing segment */
|
||||
if (!(split_seg = alloc_lv_segment(seg->segtype,
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->lv, seg->le, seg->len, seg->reshape_len,
|
||||
seg->status, seg->stripe_size,
|
||||
seg->log_lv,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->area_count, seg->area_len, seg->data_copies,
|
||||
seg->chunk_size, seg->region_size,
|
||||
seg->extents_copied, seg->pvmove_source_seg))) {
|
||||
log_error("Couldn't allocate cloned LV segment.");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -137,7 +137,11 @@
|
||||
e.g. to prohibit allocation of a RAID image
|
||||
on a PV already holing an image of the RAID set */
|
||||
#define LOCKD_SANLOCK_LV UINT64_C(0x0080000000000000) /* LV - Internal use only */
|
||||
/* Next unused flag: UINT64_C(0x0100000000000000) */
|
||||
#define LV_RESHAPE_DELTA_DISKS_PLUS UINT64_C(0x0100000000000000) /* LV reshape flag delta disks plus image(s) */
|
||||
#define LV_RESHAPE_DELTA_DISKS_MINUS UINT64_C(0x0200000000000000) /* LV reshape flag delta disks minus image(s) */
|
||||
|
||||
#define LV_REMOVE_AFTER_RESHAPE UINT64_C(0x0400000000000000) /* LV needs to be removed after a shrinking reshape */
|
||||
/* Next unused flag: UINT64_C(0x0800000000000000) */
|
||||
|
||||
/* Format features flags */
|
||||
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
|
||||
@@ -446,6 +450,7 @@ struct lv_segment {
|
||||
const struct segment_type *segtype;
|
||||
uint32_t le;
|
||||
uint32_t len;
|
||||
uint32_t reshape_len; /* For RAID: user hidden additional out of place reshaping length off area_len and len */
|
||||
|
||||
uint64_t status;
|
||||
|
||||
@@ -454,6 +459,7 @@ struct lv_segment {
|
||||
uint32_t writebehind; /* For RAID (RAID1 only) */
|
||||
uint32_t min_recovery_rate; /* For RAID */
|
||||
uint32_t max_recovery_rate; /* For RAID */
|
||||
uint32_t data_offset; /* For RAID: data offset in sectors on each data component image */
|
||||
uint32_t area_count;
|
||||
uint32_t area_len;
|
||||
uint32_t chunk_size; /* For snapshots/thin_pool. In sectors. */
|
||||
@@ -464,6 +470,7 @@ struct lv_segment {
|
||||
struct logical_volume *cow;
|
||||
struct dm_list origin_list;
|
||||
uint32_t region_size; /* For mirrors, replicators - in sectors */
|
||||
uint32_t data_copies; /* For RAID: number of data copies (e.g. 3 for RAID 6 */
|
||||
uint32_t extents_copied;/* Number of extents synced for raids/mirrors */
|
||||
struct logical_volume *log_lv;
|
||||
struct lv_segment *pvmove_source_seg;
|
||||
@@ -1067,9 +1074,16 @@ struct lv_segment *get_only_segment_using_this_lv(const struct logical_volume *l
|
||||
* Useful functions for managing snapshots.
|
||||
*/
|
||||
int lv_is_origin(const struct logical_volume *lv);
|
||||
#define lv_is_thick_origin lv_is_origin
|
||||
|
||||
int lv_is_thin_origin(const struct logical_volume *lv, unsigned *snap_count);
|
||||
int lv_is_cache_origin(const struct logical_volume *lv);
|
||||
int lv_is_thin_snapshot(const struct logical_volume *lv);
|
||||
|
||||
int lv_is_cow(const struct logical_volume *lv);
|
||||
#define lv_is_thick_snapshot lv_is_cow
|
||||
|
||||
int lv_is_cache_origin(const struct logical_volume *lv);
|
||||
|
||||
int lv_is_merging_cow(const struct logical_volume *cow);
|
||||
uint32_t cow_max_extents(const struct logical_volume *origin, uint32_t chunk_size);
|
||||
int cow_has_min_chunks(const struct volume_group *vg, uint32_t cow_extents, uint32_t chunk_size);
|
||||
@@ -1198,7 +1212,8 @@ struct logical_volume *first_replicator_dev(const struct logical_volume *lv);
|
||||
int lv_is_raid_with_tracking(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv);
|
||||
int lv_raid_change_image_count(struct logical_volume *lv,
|
||||
uint32_t new_count, struct dm_list *allocate_pvs);
|
||||
uint32_t new_count, const uint32_t region_size,
|
||||
struct dm_list *allocate_pvs);
|
||||
int lv_raid_split(struct logical_volume *lv, const char *split_name,
|
||||
uint32_t new_count, struct dm_list *splittable_pvs);
|
||||
int lv_raid_split_and_track(struct logical_volume *lv,
|
||||
@@ -1217,6 +1232,16 @@ int lv_raid_replace(struct logical_volume *lv, int force,
|
||||
struct dm_list *remove_pvs, struct dm_list *allocate_pvs);
|
||||
int lv_raid_remove_missing(struct logical_volume *lv);
|
||||
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
|
||||
uint32_t raid_rmeta_extents_delta(struct cmd_context *cmd,
|
||||
uint32_t rimage_extents_cur, uint32_t rimage_extents_new,
|
||||
uint32_t region_size, uint32_t extent_size);
|
||||
uint32_t raid_rimage_extents(const struct segment_type *segtype,
|
||||
uint32_t extents, uint32_t stripes, uint32_t data_copies);
|
||||
uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t raid_size, uint32_t region_size);
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size);
|
||||
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
|
||||
int lv_raid_in_sync(const struct logical_volume *lv);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
||||
@@ -1256,7 +1256,7 @@ uint32_t extents_from_percent_size(struct volume_group *vg, const struct dm_list
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* Fall back to use all PVs in VG like %FREE */
|
||||
/* fall through to use all PVs in VG like %FREE */
|
||||
case PERCENT_FREE:
|
||||
if (!(extents = vg->free_count)) {
|
||||
log_error("No free extents in Volume group %s.", vg->name);
|
||||
@@ -5629,6 +5629,11 @@ static int _access_vg_lock_type(struct cmd_context *cmd, struct volume_group *vg
|
||||
}
|
||||
}
|
||||
|
||||
if (test_mode()) {
|
||||
log_error("Test mode is not yet supported with lock type %s.", vg->lock_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -6383,7 +6388,7 @@ int vg_strip_outdated_historical_lvs(struct volume_group *vg) {
|
||||
* Removal time in the future? Not likely,
|
||||
* but skip this item in any case.
|
||||
*/
|
||||
if ((current_time) < glvl->glv->historical->timestamp_removed)
|
||||
if (current_time < (time_t) glvl->glv->historical->timestamp_removed)
|
||||
continue;
|
||||
|
||||
if ((current_time - glvl->glv->historical->timestamp_removed) > threshold) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,8 @@ struct segment_type *get_segtype_from_flag(struct cmd_context *cmd, uint64_t fla
|
||||
{
|
||||
struct segment_type *segtype;
|
||||
|
||||
dm_list_iterate_items(segtype, &cmd->segtypes)
|
||||
/* Iterate backwards to provide aliases; e.g. raid5 instead of raid5_ls */
|
||||
dm_list_iterate_back_items(segtype, &cmd->segtypes)
|
||||
if (flag & segtype->flags)
|
||||
return segtype;
|
||||
|
||||
|
||||
@@ -50,7 +50,8 @@ struct dev_manager;
|
||||
#define SEG_RAID0 0x0000000000040000ULL
|
||||
#define SEG_RAID0_META 0x0000000000080000ULL
|
||||
#define SEG_RAID1 0x0000000000100000ULL
|
||||
#define SEG_RAID10 0x0000000000200000ULL
|
||||
#define SEG_RAID10_NEAR 0x0000000000200000ULL
|
||||
#define SEG_RAID10 SEG_RAID10_NEAR
|
||||
#define SEG_RAID4 0x0000000000400000ULL
|
||||
#define SEG_RAID5_N 0x0000000000800000ULL
|
||||
#define SEG_RAID5_LA 0x0000000001000000ULL
|
||||
@@ -132,10 +133,18 @@ struct dev_manager;
|
||||
#define segtype_is_raid6_nr(segtype) ((segtype)->flags & SEG_RAID6_NR ? 1 : 0)
|
||||
#define segtype_is_raid6_n_6(segtype) ((segtype)->flags & SEG_RAID6_N_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_zr(segtype) ((segtype)->flags & SEG_RAID6_ZR ? 1 : 0)
|
||||
#define segtype_is_raid6_ls_6(segtype) ((segtype)->flags & SEG_RAID6_LS_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_rs_6(segtype) ((segtype)->flags & SEG_RAID6_RS_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_la_6(segtype) ((segtype)->flags & SEG_RAID6_LA_6 ? 1 : 0)
|
||||
#define segtype_is_raid6_ra_6(segtype) ((segtype)->flags & SEG_RAID6_RA_6 ? 1 : 0)
|
||||
#define segtype_is_any_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10_near(segtype) segtype_is_raid10(segtype)
|
||||
/* FIXME: once raid10_offset supported */
|
||||
#define segtype_is_raid10_offset(segtype) 0 // ((segtype)->flags & SEG_RAID10_OFFSET ? 1 : 0)
|
||||
#define segtype_is_raid_with_meta(segtype) (segtype_is_raid(segtype) && !segtype_is_raid0(segtype))
|
||||
#define segtype_is_striped_raid(segtype) (segtype_is_raid(segtype) && !segtype_is_raid1(segtype))
|
||||
#define segtype_is_reshapable_raid(segtype) ((segtype_is_striped_raid(segtype) && !segtype_is_any_raid0(segtype)) || segtype_is_raid10_near(segtype) || segtype_is_raid10_offset(segtype))
|
||||
#define segtype_is_snapshot(segtype) ((segtype)->flags & SEG_SNAPSHOT ? 1 : 0)
|
||||
#define segtype_is_striped(segtype) ((segtype)->flags & SEG_AREAS_STRIPED ? 1 : 0)
|
||||
#define segtype_is_thin(segtype) ((segtype)->flags & (SEG_THIN_POOL|SEG_THIN_VOLUME) ? 1 : 0)
|
||||
@@ -185,6 +194,8 @@ struct dev_manager;
|
||||
#define seg_is_raid10(seg) segtype_is_raid10((seg)->segtype)
|
||||
#define seg_is_raid10_near(seg) segtype_is_raid10_near((seg)->segtype)
|
||||
#define seg_is_raid_with_meta(seg) segtype_is_raid_with_meta((seg)->segtype)
|
||||
#define seg_is_striped_raid(seg) segtype_is_striped_raid((seg)->segtype)
|
||||
#define seg_is_reshapable_raid(seg) segtype_is_reshapable_raid((seg)->segtype)
|
||||
#define seg_is_replicator(seg) ((seg)->segtype->flags & SEG_REPLICATOR ? 1 : 0)
|
||||
#define seg_is_replicator_dev(seg) ((seg)->segtype->flags & SEG_REPLICATOR_DEV ? 1 : 0)
|
||||
#define seg_is_snapshot(seg) segtype_is_snapshot((seg)->segtype)
|
||||
@@ -275,6 +286,7 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
|
||||
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.7 */
|
||||
#define RAID_FEATURE_RESHAPING (1U << 2) /* version 1.8 */
|
||||
#define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */
|
||||
#define RAID_FEATURE_RESHAPE (1U << 4) /* version 1.10.2 */
|
||||
|
||||
#ifdef RAID_INTERNAL
|
||||
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
|
||||
|
||||
@@ -238,8 +238,8 @@ static struct lv_segment *_alloc_snapshot_seg(struct logical_volume *lv)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new snapshot segment.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ static takeover_fn_t _takeover_fns[][11] = {
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r45, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r45_r1, r45_r54, r45_r6, X , X , X },
|
||||
/* raid6 */ { X , r6__str, X , r6__r0, r6__r0m, X , r6__r45, X , X , X , X },
|
||||
/* raid10 */ // { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , r10_r10, r10_r01, X },
|
||||
/* raid10 */ { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , X , X , X },
|
||||
/* raid01 */ // { X , r01_str, X , X , X , X , X , X , r01_r10, r01_r01, X },
|
||||
/* other */ { X , X , X , X , X , X , X , X , X , X , X },
|
||||
};
|
||||
|
||||
@@ -752,6 +752,19 @@ int lv_is_thin_origin(const struct logical_volume *lv, unsigned int *snap_count)
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_is_thin_snapshot(const struct logical_volume *lv)
|
||||
{
|
||||
struct lv_segment *seg;
|
||||
|
||||
if (!lv_is_thin_volume(lv))
|
||||
return 0;
|
||||
|
||||
if ((seg = first_seg(lv)) && (seg->origin || seg->external_lv))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Explict check of new thin pool for usability
|
||||
*
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
#include "lib.h"
|
||||
#include "config.h"
|
||||
#include "lvm-file.h"
|
||||
#include "lvm-flock.h"
|
||||
#include "lvm-signal.h"
|
||||
#include "locking.h"
|
||||
|
||||
148
lib/raid/raid.c
148
lib/raid/raid.c
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -87,13 +87,13 @@ static int _raid_text_import_areas(struct lv_segment *seg,
|
||||
}
|
||||
|
||||
/* Metadata device comes first. */
|
||||
if (!seg_is_raid0(seg)) {
|
||||
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
|
||||
log_error("Couldn't find volume '%s' for segment '%s'.",
|
||||
cv->v.str ? : "NULL", seg_name);
|
||||
return 0;
|
||||
}
|
||||
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
|
||||
log_error("Couldn't find volume '%s' for segment '%s'.",
|
||||
cv->v.str ? : "NULL", seg_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strstr(lv->name, "_rmeta_")) {
|
||||
if (!set_lv_segment_area_lv(seg, s, lv, 0, RAID_META))
|
||||
return_0;
|
||||
cv = cv->next;
|
||||
@@ -137,6 +137,7 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
} raid_attr_import[] = {
|
||||
{ "region_size", &seg->region_size },
|
||||
{ "stripe_size", &seg->stripe_size },
|
||||
{ "data_copies", &seg->data_copies },
|
||||
{ "writebehind", &seg->writebehind },
|
||||
{ "min_recovery_rate", &seg->min_recovery_rate },
|
||||
{ "max_recovery_rate", &seg->max_recovery_rate },
|
||||
@@ -146,6 +147,10 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
for (i = 0; i < DM_ARRAY_SIZE(raid_attr_import); i++, aip++) {
|
||||
if (dm_config_has_node(sn, aip->name)) {
|
||||
if (!dm_config_get_uint32(sn, aip->name, aip->var)) {
|
||||
if (!strcmp(aip->name, "data_copies")) {
|
||||
*aip->var = 0;
|
||||
continue;
|
||||
}
|
||||
log_error("Couldn't read '%s' for segment %s of logical volume %s.",
|
||||
aip->name, dm_config_parent_name(sn), seg->lv->name);
|
||||
return 0;
|
||||
@@ -165,6 +170,9 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (seg->data_copies < 2)
|
||||
seg->data_copies = lv_raid_data_copies(seg->segtype, seg->area_count);
|
||||
|
||||
if (seg_is_any_raid0(seg))
|
||||
seg->area_len /= seg->area_count;
|
||||
|
||||
@@ -183,18 +191,31 @@ static int _raid_text_export_raid0(const struct lv_segment *seg, struct formatte
|
||||
|
||||
static int _raid_text_export_raid(const struct lv_segment *seg, struct formatter *f)
|
||||
{
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
|
||||
if (raid0)
|
||||
outfc(f, (seg->area_count == 1) ? "# linear" : NULL,
|
||||
"stripe_count = %u", seg->area_count);
|
||||
|
||||
else {
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
if (seg_is_any_raid10(seg) && seg->data_copies > 0)
|
||||
outf(f, "data_copies = %" PRIu32, seg->data_copies);
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
}
|
||||
|
||||
if (seg->stripe_size)
|
||||
outf(f, "stripe_size = %" PRIu32, seg->stripe_size);
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
if (seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
|
||||
if (!raid0) {
|
||||
if (seg_is_raid1(seg) && seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
}
|
||||
|
||||
return out_areas(f, seg, "raid");
|
||||
}
|
||||
@@ -216,14 +237,16 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
struct dm_tree_node *node, uint64_t len,
|
||||
uint32_t *pvmove_mirror_count __attribute__((unused)))
|
||||
{
|
||||
int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0;
|
||||
uint32_t s;
|
||||
uint64_t flags = 0;
|
||||
uint64_t rebuilds = 0;
|
||||
uint64_t writemostly = 0;
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
struct dm_tree_node_raid_params params;
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&rebuilds, 0, sizeof(rebuilds));
|
||||
memset(&writemostly, 0, sizeof(writemostly));
|
||||
|
||||
if (!seg->area_count) {
|
||||
log_error(INTERNAL_ERROR "_raid_add_target_line called "
|
||||
@@ -232,63 +255,84 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
* 64 device restriction imposed by kernel as well. It is
|
||||
* not strictly a userspace limitation.
|
||||
* 253 device restriction imposed by kernel due to MD and dm-raid bitfield limitation in superblock.
|
||||
* It is not strictly a userspace limitation.
|
||||
*/
|
||||
if (seg->area_count > 64) {
|
||||
log_error("Unable to handle more than 64 devices in a "
|
||||
"single RAID array");
|
||||
if (seg->area_count > DEFAULT_RAID_MAX_IMAGES) {
|
||||
log_error("Unable to handle more than %u devices in a "
|
||||
"single RAID array", DEFAULT_RAID_MAX_IMAGES);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!raid0) {
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
if (!seg->region_size) {
|
||||
log_error("Missing region size for mirror segment.");
|
||||
log_error("Missing region size for raid segment in %s.",
|
||||
seg_lv(seg, 0)->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_REBUILD)
|
||||
rebuilds |= 1ULL << s;
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
uint64_t status = seg_lv(seg, s)->status;
|
||||
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_WRITEMOSTLY)
|
||||
writemostly |= 1ULL << s;
|
||||
if (status & LV_REBUILD)
|
||||
rebuilds[s/64] |= 1ULL << (s%64);
|
||||
|
||||
if (status & LV_RESHAPE_DELTA_DISKS_PLUS) {
|
||||
delta_disks++;
|
||||
delta_disks_plus++;
|
||||
} else if (status & LV_RESHAPE_DELTA_DISKS_MINUS) {
|
||||
delta_disks--;
|
||||
delta_disks_minus++;
|
||||
}
|
||||
|
||||
if (delta_disks_plus && delta_disks_minus) {
|
||||
log_error(INTERNAL_ERROR "Invalid request for delta disks minus and delta disks plus!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (status & LV_WRITEMOSTLY)
|
||||
writemostly[s/64] |= 1ULL << (s%64);
|
||||
}
|
||||
|
||||
data_offset = seg->data_offset;
|
||||
|
||||
if (mirror_in_sync())
|
||||
flags = DM_NOSYNC;
|
||||
}
|
||||
|
||||
params.raid_type = lvseg_name(seg);
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (raid0) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg->segtype->parity_devs) {
|
||||
if (seg->segtype->parity_devs) {
|
||||
/* RAID 4/5/6 */
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count - seg->segtype->parity_devs;
|
||||
} else if (seg_is_raid10(seg)) {
|
||||
/* RAID 10 only supports 2 mirrors now */
|
||||
params.mirrors = 2;
|
||||
params.stripes = seg->area_count / 2;
|
||||
} else if (seg_is_any_raid0(seg)) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg_is_any_raid10(seg)) {
|
||||
params.data_copies = seg->data_copies;
|
||||
params.stripes = seg->area_count;
|
||||
} else {
|
||||
/* RAID 1 */
|
||||
params.mirrors = seg->area_count;
|
||||
params.mirrors = seg->data_copies;
|
||||
params.stripes = 1;
|
||||
params.writebehind = seg->writebehind;
|
||||
memcpy(params.writemostly, writemostly, sizeof(params.writemostly));
|
||||
}
|
||||
|
||||
if (!raid0) {
|
||||
/* RAID 0 doesn't have a bitmap, thus no region_size, rebuilds etc. */
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
params.region_size = seg->region_size;
|
||||
params.rebuilds = rebuilds;
|
||||
params.writemostly = writemostly;
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
params.min_recovery_rate = seg->min_recovery_rate;
|
||||
params.max_recovery_rate = seg->max_recovery_rate;
|
||||
params.delta_disks = delta_disks;
|
||||
params.data_offset = data_offset;
|
||||
}
|
||||
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (!dm_tree_node_add_raid_target_with_params(node, len, ¶ms))
|
||||
return_0;
|
||||
|
||||
@@ -450,6 +494,10 @@ static int _raid_target_present(struct cmd_context *cmd,
|
||||
else
|
||||
log_very_verbose("Target raid does not support %s.",
|
||||
SEG_TYPE_NAME_RAID4);
|
||||
|
||||
if (maj > 1 ||
|
||||
(maj == 1 && (min > 10 || (min == 10 && patchlevel >= 2))))
|
||||
_raid_attrs |= RAID_FEATURE_RESHAPE;
|
||||
}
|
||||
|
||||
if (attributes)
|
||||
@@ -537,14 +585,20 @@ static const struct raid_type {
|
||||
{ SEG_TYPE_NAME_RAID10, 0, SEG_RAID10 | SEG_AREAS_MIRRORED },
|
||||
{ SEG_TYPE_NAME_RAID4, 1, SEG_RAID4 },
|
||||
{ SEG_TYPE_NAME_RAID5, 1, SEG_RAID5 },
|
||||
{ SEG_TYPE_NAME_RAID5_N, 1, SEG_RAID5_N },
|
||||
{ SEG_TYPE_NAME_RAID5_LA, 1, SEG_RAID5_LA },
|
||||
{ SEG_TYPE_NAME_RAID5_LS, 1, SEG_RAID5_LS },
|
||||
{ SEG_TYPE_NAME_RAID5_RA, 1, SEG_RAID5_RA },
|
||||
{ SEG_TYPE_NAME_RAID5_RS, 1, SEG_RAID5_RS },
|
||||
{ SEG_TYPE_NAME_RAID6, 2, SEG_RAID6 },
|
||||
{ SEG_TYPE_NAME_RAID6_N_6, 2, SEG_RAID6_N_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_NC, 2, SEG_RAID6_NC },
|
||||
{ SEG_TYPE_NAME_RAID6_NR, 2, SEG_RAID6_NR },
|
||||
{ SEG_TYPE_NAME_RAID6_ZR, 2, SEG_RAID6_ZR }
|
||||
{ SEG_TYPE_NAME_RAID6_ZR, 2, SEG_RAID6_ZR },
|
||||
{ SEG_TYPE_NAME_RAID6_LS_6, 2, SEG_RAID6_LS_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_RS_6, 2, SEG_RAID6_RS_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_LA_6, 2, SEG_RAID6_LA_6 },
|
||||
{ SEG_TYPE_NAME_RAID6_RA_6, 2, SEG_RAID6_RA_6 }
|
||||
};
|
||||
|
||||
static struct segment_type *_init_raid_segtype(struct cmd_context *cmd,
|
||||
|
||||
@@ -42,7 +42,7 @@ static int _pthread_create(pthread_t *t, void *(*fun)(void *), void *arg, int st
|
||||
/*
|
||||
* We use a smaller stack since it gets preallocated in its entirety
|
||||
*/
|
||||
pthread_attr_setstacksize(&attr, stacksize);
|
||||
pthread_attr_setstacksize(&attr, stacksize + getpagesize());
|
||||
return pthread_create(t, &attr, fun, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1851,10 +1851,10 @@ static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
|
||||
dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
|
||||
else {
|
||||
if (_log_suppress || dmt->ioctl_errno == EINTR)
|
||||
log_verbose("device-mapper: %s ioctl on %s%s%s%.0d%s%.0d%s%s "
|
||||
log_verbose("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
|
||||
"failed: %s",
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
dmt->major > 0 ? "(" : "",
|
||||
dmt->major > 0 ? dmt->major : 0,
|
||||
dmt->major > 0 ? ":" : "",
|
||||
@@ -1863,10 +1863,10 @@ static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
|
||||
dmt->major > 0 ? ")" : "",
|
||||
strerror(dmt->ioctl_errno));
|
||||
else
|
||||
log_error("device-mapper: %s ioctl on %s%s%s%.0d%s%.0d%s%s "
|
||||
log_error("device-mapper: %s ioctl on %s %s%s%.0d%s%.0d%s%s "
|
||||
"failed: %s",
|
||||
_cmd_data_v4[dmt->type].name,
|
||||
dmi->name, dmi->uuid,
|
||||
dmi->name, dmi->uuid,
|
||||
dmt->major > 0 ? "(" : "",
|
||||
dmt->major > 0 ? dmt->major : 0,
|
||||
dmt->major > 0 ? ":" : "",
|
||||
|
||||
@@ -331,6 +331,7 @@ struct dm_status_raid {
|
||||
char *dev_health;
|
||||
/* idle, frozen, resync, recover, check, repair */
|
||||
char *sync_action;
|
||||
uint64_t data_offset; /* RAID out-of-place reshaping */
|
||||
};
|
||||
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
@@ -1719,7 +1720,7 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t rebuilds,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t flags);
|
||||
|
||||
/*
|
||||
@@ -1746,18 +1747,22 @@ struct dm_tree_node_raid_params {
|
||||
uint32_t region_size;
|
||||
uint32_t stripe_size;
|
||||
|
||||
int delta_disks; /* +/- number of disks to add/remove (reshaping) */
|
||||
int data_offset; /* data offset to set (out-of-place reshaping) */
|
||||
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. By choosing a 'uint64_t', we limit ourself
|
||||
* to RAID arrays with 64 devices.
|
||||
*/
|
||||
uint64_t rebuilds;
|
||||
uint64_t writemostly;
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t data_copies; /* RAID # of data copies */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
#define DEV_NAME(dmt) (dmt->mangled_dev_name ? : dmt->dev_name)
|
||||
#define DEV_UUID(DMT) (dmt->mangled_uuid ? : dmt->uuid)
|
||||
|
||||
#define RAID_BITMAP_SIZE 4
|
||||
|
||||
int mangle_string(const char *str, const char *str_name, size_t len,
|
||||
char *buf, size_t buf_len, dm_string_mangling_t mode);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2005-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of the device-mapper userspace tools.
|
||||
*
|
||||
@@ -47,13 +47,19 @@ enum {
|
||||
SEG_RAID1,
|
||||
SEG_RAID10,
|
||||
SEG_RAID4,
|
||||
SEG_RAID5_N,
|
||||
SEG_RAID5_LA,
|
||||
SEG_RAID5_RA,
|
||||
SEG_RAID5_LS,
|
||||
SEG_RAID5_RS,
|
||||
SEG_RAID6_N_6,
|
||||
SEG_RAID6_ZR,
|
||||
SEG_RAID6_NR,
|
||||
SEG_RAID6_NC,
|
||||
SEG_RAID6_LS_6,
|
||||
SEG_RAID6_RS_6,
|
||||
SEG_RAID6_LA_6,
|
||||
SEG_RAID6_RA_6,
|
||||
};
|
||||
|
||||
/* FIXME Add crypt and multipath support */
|
||||
@@ -81,13 +87,20 @@ static const struct {
|
||||
{ SEG_RAID1, "raid1"},
|
||||
{ SEG_RAID10, "raid10"},
|
||||
{ SEG_RAID4, "raid4"},
|
||||
{ SEG_RAID5_N, "raid5_n"},
|
||||
{ SEG_RAID5_LA, "raid5_la"},
|
||||
{ SEG_RAID5_RA, "raid5_ra"},
|
||||
{ SEG_RAID5_LS, "raid5_ls"},
|
||||
{ SEG_RAID5_RS, "raid5_rs"},
|
||||
{ SEG_RAID6_N_6,"raid6_n_6"},
|
||||
{ SEG_RAID6_ZR, "raid6_zr"},
|
||||
{ SEG_RAID6_NR, "raid6_nr"},
|
||||
{ SEG_RAID6_NC, "raid6_nc"},
|
||||
{ SEG_RAID6_LS_6, "raid6_ls_6"},
|
||||
{ SEG_RAID6_RS_6, "raid6_rs_6"},
|
||||
{ SEG_RAID6_LA_6, "raid6_la_6"},
|
||||
{ SEG_RAID6_RA_6, "raid6_ra_6"},
|
||||
|
||||
|
||||
/*
|
||||
* WARNING: Since 'raid' target overloads this 1:1 mapping table
|
||||
@@ -192,11 +205,14 @@ struct load_segment {
|
||||
struct dm_tree_node *replicator;/* Replicator-dev */
|
||||
uint64_t rdevice_index; /* Replicator-dev */
|
||||
|
||||
uint64_t rebuilds; /* raid */
|
||||
uint64_t writemostly; /* raid */
|
||||
int delta_disks; /* raid reshape number of disks */
|
||||
int data_offset; /* raid reshape data offset on disk to set */
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint32_t writebehind; /* raid */
|
||||
uint32_t max_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t data_copies; /* raid10 data_copies */
|
||||
|
||||
struct dm_tree_node *metadata; /* Thin_pool + Cache */
|
||||
struct dm_tree_node *pool; /* Thin_pool, Thin */
|
||||
@@ -2140,13 +2156,19 @@ static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID10:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
if (!area->dev_node) {
|
||||
EMIT_PARAMS(*pos, " -");
|
||||
break;
|
||||
@@ -2334,16 +2356,21 @@ static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *s
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Is parameter non-zero? */
|
||||
#define PARAM_IS_SET(p) ((p) ? 1 : 0)
|
||||
static int _2_if_value(unsigned p)
|
||||
{
|
||||
return p ? 2 : 0;
|
||||
}
|
||||
|
||||
/* Return number of bits assuming 4 * 64 bit size */
|
||||
static int _get_params_count(uint64_t bits)
|
||||
/* Return number of bits passed in @bits assuming 2 * 64 bit size */
|
||||
static int _get_params_count(uint64_t *bits)
|
||||
{
|
||||
int r = 0;
|
||||
int i = RAID_BITMAP_SIZE;
|
||||
|
||||
r += 2 * hweight32(bits & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits >> 32);
|
||||
while (i--) {
|
||||
r += 2 * hweight32(bits[i] & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits[i] >> 32);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -2354,32 +2381,60 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
size_t paramsize)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t area_count = seg->area_count / 2;
|
||||
int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
|
||||
int pos = 0;
|
||||
unsigned type = seg->type;
|
||||
unsigned type;
|
||||
|
||||
if (seg->area_count % 2)
|
||||
return 0;
|
||||
|
||||
if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
|
||||
param_count++;
|
||||
|
||||
param_count += 2 * (PARAM_IS_SET(seg->region_size) +
|
||||
PARAM_IS_SET(seg->writebehind) +
|
||||
PARAM_IS_SET(seg->min_recovery_rate) +
|
||||
PARAM_IS_SET(seg->max_recovery_rate));
|
||||
param_count += _2_if_value(seg->data_offset) +
|
||||
_2_if_value(seg->delta_disks) +
|
||||
_2_if_value(seg->region_size) +
|
||||
_2_if_value(seg->writebehind) +
|
||||
_2_if_value(seg->min_recovery_rate) +
|
||||
_2_if_value(seg->max_recovery_rate) +
|
||||
_2_if_value(seg->data_copies > 1);
|
||||
|
||||
/* rebuilds and writemostly are 64 bits */
|
||||
/* rebuilds and writemostly are BITMAP_SIZE * 64 bits */
|
||||
param_count += _get_params_count(seg->rebuilds);
|
||||
param_count += _get_params_count(seg->writemostly);
|
||||
|
||||
if ((type == SEG_RAID1) && seg->stripe_size)
|
||||
log_error("WARNING: Ignoring RAID1 stripe size");
|
||||
if ((seg->type == SEG_RAID1) && seg->stripe_size)
|
||||
log_info("WARNING: Ignoring RAID1 stripe size");
|
||||
|
||||
/* Kernel only expects "raid0", not "raid0_meta" */
|
||||
type = seg->type;
|
||||
if (type == SEG_RAID0_META)
|
||||
type = SEG_RAID0;
|
||||
#if 0
|
||||
/* Kernel only expects "raid10", not "raid10_{far,offset}" */
|
||||
else if (type == SEG_RAID10_FAR ||
|
||||
type == SEG_RAID10_OFFSET) {
|
||||
param_count += 2;
|
||||
type = SEG_RAID10_NEAR;
|
||||
}
|
||||
#endif
|
||||
|
||||
EMIT_PARAMS(pos, "%s %d %u", _dm_segtypes[type].target,
|
||||
EMIT_PARAMS(pos, "%s %d %u",
|
||||
// type == SEG_RAID10_NEAR ? "raid10" : _dm_segtypes[type].target,
|
||||
type == SEG_RAID10 ? "raid10" : _dm_segtypes[type].target,
|
||||
param_count, seg->stripe_size);
|
||||
|
||||
#if 0
|
||||
if (seg->type == SEG_RAID10_FAR)
|
||||
EMIT_PARAMS(pos, " raid10_format far");
|
||||
else if (seg->type == SEG_RAID10_OFFSET)
|
||||
EMIT_PARAMS(pos, " raid10_format offset");
|
||||
#endif
|
||||
|
||||
if (seg->data_copies > 1 && type == SEG_RAID10)
|
||||
EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies);
|
||||
|
||||
if (seg->flags & DM_NOSYNC)
|
||||
EMIT_PARAMS(pos, " nosync");
|
||||
else if (seg->flags & DM_FORCESYNC)
|
||||
@@ -2388,27 +2443,38 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
if (seg->region_size)
|
||||
EMIT_PARAMS(pos, " region_size %u", seg->region_size);
|
||||
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->rebuilds & (1ULL << i))
|
||||
/* If seg-data_offset == 1, kernel needs a zero offset to adjust to it */
|
||||
if (seg->data_offset)
|
||||
EMIT_PARAMS(pos, " data_offset %d", seg->data_offset == 1 ? 0 : seg->data_offset);
|
||||
|
||||
if (seg->delta_disks)
|
||||
EMIT_PARAMS(pos, " delta_disks %d", seg->delta_disks);
|
||||
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->rebuilds[i/64] & (1ULL << (i%64)))
|
||||
EMIT_PARAMS(pos, " rebuild %u", i);
|
||||
|
||||
if (seg->min_recovery_rate)
|
||||
EMIT_PARAMS(pos, " min_recovery_rate %u",
|
||||
seg->min_recovery_rate);
|
||||
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->writemostly & (1ULL << i))
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->writemostly[i/64] & (1ULL << (i%64)))
|
||||
EMIT_PARAMS(pos, " write_mostly %u", i);
|
||||
|
||||
if (seg->writebehind)
|
||||
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
|
||||
|
||||
/*
|
||||
* Has to be before "min_recovery_rate" or the kernels
|
||||
* check will fail when both set and min > previous max
|
||||
*/
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
if (seg->min_recovery_rate)
|
||||
EMIT_PARAMS(pos, " min_recovery_rate %u",
|
||||
seg->min_recovery_rate);
|
||||
|
||||
/* Print number of metadata/data device pairs */
|
||||
EMIT_PARAMS(pos, " %u", seg->area_count/2);
|
||||
EMIT_PARAMS(pos, " %u", area_count);
|
||||
|
||||
if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
|
||||
return_0;
|
||||
@@ -2588,13 +2654,19 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID10:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
target_type_is_raid = 1;
|
||||
r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
|
||||
params, paramsize);
|
||||
@@ -2849,8 +2921,8 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
else if (child->props.size_changed < 0)
|
||||
dnode->props.size_changed = -1;
|
||||
|
||||
/* Resume device immediately if it has parents and its size changed */
|
||||
if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
|
||||
/* No resume for a device without parents or with unchanged or smaller size */
|
||||
if (!dm_tree_node_num_children(child, 1) || (child->props.size_changed <= 0))
|
||||
continue;
|
||||
|
||||
if (!node_created && (dm_list_size(&child->props.segs) == 1)) {
|
||||
@@ -3242,11 +3314,14 @@ int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->rebuilds = p->rebuilds;
|
||||
seg->writemostly = p->writemostly;
|
||||
seg->delta_disks = p->delta_disks;
|
||||
seg->data_offset = p->data_offset;
|
||||
memcpy(seg->rebuilds, p->rebuilds, sizeof(seg->rebuilds));
|
||||
memcpy(seg->writemostly, p->writemostly, sizeof(seg->writemostly));
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
seg->data_copies = p->data_copies;
|
||||
seg->flags = p->flags;
|
||||
|
||||
return 1;
|
||||
@@ -3257,17 +3332,18 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t rebuilds,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct dm_tree_node_raid_params params = {
|
||||
.raid_type = raid_type,
|
||||
.region_size = region_size,
|
||||
.stripe_size = stripe_size,
|
||||
.rebuilds = rebuilds,
|
||||
.flags = flags
|
||||
};
|
||||
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
|
||||
return dm_tree_node_add_raid_target_with_params(node, size, ¶ms);
|
||||
}
|
||||
|
||||
@@ -3869,13 +3945,19 @@ int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
|
||||
case SEG_RAID0_META:
|
||||
case SEG_RAID1:
|
||||
case SEG_RAID4:
|
||||
case SEG_RAID5_N:
|
||||
case SEG_RAID5_LA:
|
||||
case SEG_RAID5_RA:
|
||||
case SEG_RAID5_LS:
|
||||
case SEG_RAID5_RS:
|
||||
case SEG_RAID6_N_6:
|
||||
case SEG_RAID6_ZR:
|
||||
case SEG_RAID6_NR:
|
||||
case SEG_RAID6_NC:
|
||||
case SEG_RAID6_LS_6:
|
||||
case SEG_RAID6_RS_6:
|
||||
case SEG_RAID6_LA_6:
|
||||
case SEG_RAID6_RA_6:
|
||||
break;
|
||||
default:
|
||||
log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
|
||||
|
||||
@@ -3062,26 +3062,31 @@ static void _get_final_time(time_range_t range, struct tm *tm,
|
||||
tm_up.tm_sec += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_MINUTE:
|
||||
if (tm_up.tm_min < 59) {
|
||||
tm_up.tm_min += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_HOUR:
|
||||
if (tm_up.tm_hour < 23) {
|
||||
tm_up.tm_hour += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_DAY:
|
||||
if (tm_up.tm_mday < _get_days_in_month(tm_up.tm_mon, tm_up.tm_year)) {
|
||||
tm_up.tm_mday += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_MONTH:
|
||||
if (tm_up.tm_mon < 11) {
|
||||
tm_up.tm_mon += 1;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RANGE_YEAR:
|
||||
tm_up.tm_year += 1;
|
||||
break;
|
||||
@@ -4204,7 +4209,7 @@ static void _recalculate_fields(struct dm_report *rh)
|
||||
{
|
||||
struct row *row;
|
||||
struct dm_report_field *field;
|
||||
size_t len;
|
||||
int len;
|
||||
|
||||
dm_list_iterate_items(row, &rh->rows) {
|
||||
dm_list_iterate_items(field, &row->fields) {
|
||||
|
||||
@@ -402,7 +402,7 @@ static int _stats_bound(const struct dm_stats *dms)
|
||||
if (dms->bind_major > 0 || dms->bind_name || dms->bind_uuid)
|
||||
return 1;
|
||||
/* %p format specifier expects a void pointer. */
|
||||
log_debug("Stats handle at %p is not bound.", (void *) dms);
|
||||
log_debug("Stats handle at %p is not bound.", dms);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3294,7 +3294,7 @@ static void _sum_histogram_bins(const struct dm_stats *dms,
|
||||
struct dm_stats_region *region;
|
||||
struct dm_histogram_bin *bins;
|
||||
struct dm_histogram *dmh_cur;
|
||||
uint64_t bin;
|
||||
int bin;
|
||||
|
||||
region = &dms->regions[region_id];
|
||||
dmh_cur = region->counters[area_id].histogram;
|
||||
@@ -3857,9 +3857,9 @@ struct _extent {
|
||||
*/
|
||||
static int _extent_start_compare(const void *p1, const void *p2)
|
||||
{
|
||||
struct _extent *r1, *r2;
|
||||
r1 = (struct _extent *) p1;
|
||||
r2 = (struct _extent *) p2;
|
||||
const struct _extent *r1, *r2;
|
||||
r1 = (const struct _extent *) p1;
|
||||
r2 = (const struct _extent *) p2;
|
||||
|
||||
if (r1->start < r2->start)
|
||||
return -1;
|
||||
@@ -3868,37 +3868,6 @@ static int _extent_start_compare(const void *p1, const void *p2)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resize the group bitmap corresponding to group_id so that it can
|
||||
* contain at least num_regions members.
|
||||
*/
|
||||
static int _stats_resize_group(struct dm_stats_group *group, int num_regions)
|
||||
{
|
||||
int last_bit = dm_bit_get_last(group->regions);
|
||||
dm_bitset_t new, old;
|
||||
|
||||
if (last_bit >= num_regions) {
|
||||
log_error("Cannot resize group bitmap to %d with bit %d set.",
|
||||
num_regions, last_bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_very_verbose("Resizing group bitmap from %d to %d (last_bit: %d).",
|
||||
group->regions[0], num_regions, last_bit);
|
||||
|
||||
new = dm_bitset_create(NULL, num_regions);
|
||||
if (!new) {
|
||||
log_error("Could not allocate memory for new group bitmap.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
old = group->regions;
|
||||
dm_bit_copy(new, old);
|
||||
group->regions = new;
|
||||
dm_bitset_destroy(old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _stats_create_group(struct dm_stats *dms, dm_bitset_t regions,
|
||||
const char *alias, uint64_t *group_id)
|
||||
{
|
||||
@@ -4003,7 +3972,7 @@ merge:
|
||||
static void _stats_copy_histogram_bounds(struct dm_histogram *to,
|
||||
struct dm_histogram *from)
|
||||
{
|
||||
uint64_t i;
|
||||
int i;
|
||||
|
||||
to->nr_bins = from->nr_bins;
|
||||
|
||||
@@ -4019,7 +3988,7 @@ static void _stats_copy_histogram_bounds(struct dm_histogram *to,
|
||||
static int _stats_check_histogram_bounds(struct dm_histogram *h1,
|
||||
struct dm_histogram *h2)
|
||||
{
|
||||
uint64_t i;
|
||||
int i;
|
||||
|
||||
if (!h1 || !h2)
|
||||
return 0;
|
||||
@@ -4202,6 +4171,37 @@ int dm_stats_get_group_descriptor(const struct dm_stats *dms,
|
||||
}
|
||||
|
||||
#ifdef HAVE_LINUX_FIEMAP_H
|
||||
/*
|
||||
* Resize the group bitmap corresponding to group_id so that it can
|
||||
* contain at least num_regions members.
|
||||
*/
|
||||
static int _stats_resize_group(struct dm_stats_group *group, int num_regions)
|
||||
{
|
||||
int last_bit = dm_bit_get_last(group->regions);
|
||||
dm_bitset_t new, old;
|
||||
|
||||
if (last_bit >= num_regions) {
|
||||
log_error("Cannot resize group bitmap to %d with bit %d set.",
|
||||
num_regions, last_bit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_very_verbose("Resizing group bitmap from %d to %d (last_bit: %d).",
|
||||
group->regions[0], num_regions, last_bit);
|
||||
|
||||
new = dm_bitset_create(NULL, num_regions);
|
||||
if (!new) {
|
||||
log_error("Could not allocate memory for new group bitmap.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
old = group->regions;
|
||||
dm_bit_copy(new, old);
|
||||
group->regions = new;
|
||||
dm_bitset_destroy(old);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Group a table of region_ids corresponding to the extents of a file.
|
||||
*/
|
||||
@@ -4557,7 +4557,7 @@ static int _stats_unmap_regions(struct dm_stats *dms, uint64_t group_id,
|
||||
log_error("Could not finalize region extent table.");
|
||||
goto out;
|
||||
}
|
||||
log_very_verbose("Kept %ld of %ld old extents",
|
||||
log_very_verbose("Kept " FMTi64 " of " FMTi64 " old extents",
|
||||
nr_kept, nr_old);
|
||||
log_very_verbose("Found " FMTu64 " new extents",
|
||||
*count - nr_kept);
|
||||
@@ -4584,7 +4584,7 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
int precise, uint64_t group_id,
|
||||
uint64_t *count, int *regroup)
|
||||
{
|
||||
struct _extent *extents = NULL, *old_extents;
|
||||
struct _extent *extents = NULL, *old_extents = NULL;
|
||||
uint64_t *regions = NULL, fail_region;
|
||||
struct dm_stats_group *group = NULL;
|
||||
struct dm_pool *extent_mem = NULL;
|
||||
@@ -4592,7 +4592,7 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
char *hist_arg = NULL;
|
||||
int update, num_bits;
|
||||
struct statfs fsbuf;
|
||||
int64_t nr_kept, i;
|
||||
int64_t nr_kept = 0, i;
|
||||
struct stat buf;
|
||||
|
||||
update = _stats_group_id_present(dms, group_id);
|
||||
@@ -4725,7 +4725,7 @@ static uint64_t *_stats_map_file_regions(struct dm_stats *dms, int fd,
|
||||
|
||||
dm_pool_free(extent_mem, extents);
|
||||
dm_pool_destroy(extent_mem);
|
||||
dm_free(hist_arg);
|
||||
|
||||
return regions;
|
||||
|
||||
out_remove:
|
||||
@@ -4842,7 +4842,7 @@ uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
if (!bounds) {
|
||||
log_error("Could not allocate memory for group "
|
||||
"histogram bounds.");
|
||||
return NULL;
|
||||
goto out;
|
||||
}
|
||||
_stats_copy_histogram_bounds(bounds,
|
||||
dms->regions[group_id].bounds);
|
||||
@@ -4869,6 +4869,8 @@ uint64_t *dm_stats_update_regions_from_fd(struct dm_stats *dms, int fd,
|
||||
bad:
|
||||
_stats_cleanup_region_ids(dms, regions, count);
|
||||
dm_free(bounds);
|
||||
dm_free(regions);
|
||||
out:
|
||||
dm_free((char *) alias);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -626,7 +626,7 @@ uint64_t dm_units_to_factor(const char *units, char *unit_type,
|
||||
uint64_t multiplier;
|
||||
|
||||
if (endptr)
|
||||
*endptr = (char *) units;
|
||||
*endptr = units;
|
||||
|
||||
if (isdigit(*units)) {
|
||||
custom_value = strtod(units, &ptr);
|
||||
@@ -709,7 +709,7 @@ uint64_t dm_units_to_factor(const char *units, char *unit_type,
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = (char *) units + 1;
|
||||
*endptr = units + 1;
|
||||
|
||||
if (_close_enough(custom_value, 0.))
|
||||
return v * multiplier; /* Use integer arithmetic */
|
||||
|
||||
@@ -89,6 +89,8 @@ static unsigned _count_fields(const char *p)
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio>
|
||||
* Versions 1.5.0+ (6 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt>
|
||||
* Versions 1.9.0+ (7 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt> <data_offset>
|
||||
*/
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_raid **status)
|
||||
@@ -147,6 +149,22 @@ int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
if (sscanf(p, "%s %" PRIu64, s->sync_action, &s->mismatch_count) != 2)
|
||||
goto_bad;
|
||||
|
||||
if (num_fields < 7)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* All pre-1.9.0 version parameters are read. Now we check
|
||||
* for additional 1.9.0+ parameters (i.e. nr_fields at least 7).
|
||||
*
|
||||
* Note that data_offset will be 0 if the
|
||||
* kernel returns a pre-1.9.0 status.
|
||||
*/
|
||||
msg_fields = "<data_offset>";
|
||||
if (!(p = _skip_fields(params, 6))) /* skip pre-1.9.0 params */
|
||||
goto bad;
|
||||
if (sscanf(p, "%" PRIu64, &s->data_offset) != 1)
|
||||
goto bad;
|
||||
|
||||
out:
|
||||
*status = s;
|
||||
|
||||
|
||||
@@ -40,6 +40,11 @@ SED = @SED@
|
||||
CFLOW_CMD = @CFLOW_CMD@
|
||||
AWK = @AWK@
|
||||
CHMOD = @CHMOD@
|
||||
EGREP = @EGREP@
|
||||
GREP = @GREP@
|
||||
SORT = @SORT@
|
||||
WC = @WC@
|
||||
|
||||
PYTHON2 = @PYTHON2@
|
||||
PYTHON3 = @PYTHON3@
|
||||
PYCOMPILE = $(top_srcdir)/autoconf/py-compile
|
||||
@@ -512,9 +517,9 @@ ifeq (,$(firstword $(EXPORTED_SYMBOLS)))
|
||||
) > $@
|
||||
else
|
||||
set -e;\
|
||||
R=$$(sort $^ | uniq -u);\
|
||||
R=$$($(SORT) $^ | uniq -u);\
|
||||
test -z "$$R" || { echo "Mismatch between symbols in shared library and lists in .exported_symbols.* files: $$R"; false; } ;\
|
||||
( for i in $$(echo $(EXPORTED_SYMBOLS) | tr ' ' '\n' | sort -rnt_ -k5 ); do\
|
||||
( for i in $$(echo $(EXPORTED_SYMBOLS) | tr ' ' '\n' | $(SORT) -rnt_ -k5 ); do\
|
||||
echo "$${i##*.} {"; echo " global:";\
|
||||
$(SED) "s/^/ /;s/$$/;/" $$i;\
|
||||
echo "};";\
|
||||
|
||||
@@ -31,18 +31,20 @@ LVMRAIDMAN = lvmraid.7
|
||||
|
||||
MAN5=lvm.conf.5
|
||||
MAN7=lvmsystemid.7 lvmreport.7
|
||||
MAN8=lvm-config.8 lvm-dumpconfig.8 lvm-fullreport.8 lvm-lvpoll.8 \
|
||||
lvchange.8 lvmconfig.8 lvconvert.8 lvcreate.8 lvdisplay.8 lvextend.8 \
|
||||
lvm.8 lvmchange.8 lvmconf.8 lvmdiskscan.8 lvmdump.8 lvmsadc.8 lvmsar.8 \
|
||||
MAN8=lvm.8 lvmconf.8 lvmdump.8
|
||||
MAN8DM=dmsetup.8 dmstats.8
|
||||
MAN8CLUSTER=
|
||||
MAN8SYSTEMD_GENERATORS=lvm2-activation-generator.8
|
||||
|
||||
MAN8GEN=lvm-config.8 lvm-dumpconfig.8 lvm-fullreport.8 lvm-lvpoll.8 \
|
||||
lvcreate.8 lvchange.8 lvmconfig.8 lvconvert.8 lvdisplay.8 lvextend.8 \
|
||||
lvreduce.8 lvremove.8 lvrename.8 lvresize.8 lvs.8 \
|
||||
lvscan.8 pvchange.8 pvck.8 pvcreate.8 pvdisplay.8 pvmove.8 pvremove.8 \
|
||||
pvresize.8 pvs.8 pvscan.8 vgcfgbackup.8 vgcfgrestore.8 vgchange.8 \
|
||||
vgck.8 vgcreate.8 vgconvert.8 vgdisplay.8 vgexport.8 vgextend.8 \
|
||||
vgimport.8 vgimportclone.8 vgmerge.8 vgmknodes.8 vgreduce.8 vgremove.8 \
|
||||
vgrename.8 vgs.8 vgscan.8 vgsplit.8
|
||||
MAN8DM=dmsetup.8 dmstats.8
|
||||
MAN8CLUSTER=
|
||||
MAN8SYSTEMD_GENERATORS=lvm2-activation-generator.8
|
||||
vgrename.8 vgs.8 vgscan.8 vgsplit.8 \
|
||||
lvmsar.8 lvmsadc.8 lvmdiskscan.8 lvmchange.8
|
||||
|
||||
ifeq ($(MAKECMDGOALS),all_man)
|
||||
MAN_ALL="yes"
|
||||
@@ -113,8 +115,8 @@ MAN8DIR=$(mandir)/man8
|
||||
|
||||
include $(top_builddir)/make.tmpl
|
||||
|
||||
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) \
|
||||
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM)
|
||||
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) \
|
||||
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM) *.gen man-generator
|
||||
DISTCLEAN_TARGETS+=$(FSADMMAN) $(BLKDEACTIVATEMAN) $(DMEVENTDMAN) \
|
||||
$(LVMETADMAN) $(LVMPOLLDMAN) $(LVMLOCKDMAN) $(CLVMDMAN) $(CMIRRORDMAN) \
|
||||
$(LVMCACHEMAN) $(LVMTHINMAN) $(LVMDBUSDMAN) $(LVMRAIDMAN)
|
||||
@@ -125,11 +127,11 @@ all: man device-mapper
|
||||
|
||||
device-mapper: $(MAN8DM)
|
||||
|
||||
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
|
||||
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
|
||||
|
||||
all_man: man
|
||||
|
||||
$(MAN5) $(MAN7) $(MAN8) $(MAN8DM) $(MAN8CLUSTER): Makefile
|
||||
$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8DM) $(MAN8CLUSTER): Makefile
|
||||
|
||||
Makefile: Makefile.in
|
||||
@:
|
||||
@@ -140,6 +142,18 @@ Makefile: Makefile.in
|
||||
*) echo "Creating $@" ; $(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $< > $@ ;; \
|
||||
esac
|
||||
|
||||
man-generator:
|
||||
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
|
||||
- ./man-generator lvmconfig > test.gen
|
||||
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
|
||||
|
||||
$(MAN8GEN): man-generator
|
||||
echo "Generating $@" ;
|
||||
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
|
||||
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
|
||||
cat $(top_srcdir)/man/see_also.end >> $@.gen
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
|
||||
|
||||
install_man5: $(MAN5)
|
||||
$(INSTALL) -d $(MAN5DIR)
|
||||
$(INSTALL_DATA) $(MAN5) $(MAN5DIR)/
|
||||
@@ -148,9 +162,10 @@ install_man7: $(MAN7)
|
||||
$(INSTALL) -d $(MAN7DIR)
|
||||
$(INSTALL_DATA) $(MAN7) $(MAN7DIR)/
|
||||
|
||||
install_man8: $(MAN8)
|
||||
install_man8: $(MAN8) $(MAN8GEN)
|
||||
$(INSTALL) -d $(MAN8DIR)
|
||||
$(INSTALL_DATA) $(MAN8) $(MAN8DIR)/
|
||||
$(INSTALL_DATA) $(MAN8GEN) $(MAN8DIR)/
|
||||
|
||||
install_lvm2: install_man5 install_man7 install_man8
|
||||
|
||||
|
||||
@@ -154,6 +154,32 @@ This timeout will be ignored if you start \fBclvmd\fP with the \fB\-d\fP.
|
||||
.br
|
||||
Display the version of the cluster LVM daemon.
|
||||
.
|
||||
.SH NOTES
|
||||
.
|
||||
.SS Activation
|
||||
.
|
||||
In a clustered VG, clvmd is used for activation, and the following values are
|
||||
possible with \fBlvchange/vgchange -a\fP:
|
||||
.IP \fBy\fP|\fBsy\fP
|
||||
clvmd activates the LV in shared mode (with a shared lock),
|
||||
allowing multiple nodes to activate the LV concurrently.
|
||||
If the LV type prohibits shared access, such as an LV with a snapshot,
|
||||
an exclusive lock is automatically used instead.
|
||||
clvmd attempts to activate the LV concurrently on all nodes.
|
||||
.IP \fBey\fP
|
||||
clvmd activates the LV in exclusive mode (with an exclusive lock),
|
||||
allowing a single node to activate the LV.
|
||||
clvmd attempts to activate the LV concurrently on all nodes, but only
|
||||
one will succeed.
|
||||
.IP \fBly\fP
|
||||
clvmd attempts to activate the LV only on the local node.
|
||||
If the LV type allows concurrent access, then shared mode is used,
|
||||
otherwise exclusive.
|
||||
.IP \fBn\fP
|
||||
clvmd deactivates the LV on all nodes.
|
||||
.IP \fBln\fP
|
||||
clvmd deactivates the LV on the local node.
|
||||
.
|
||||
.SH ENVIRONMENT VARIABLES
|
||||
.TP
|
||||
.B LVM_CLVMD_BINARY
|
||||
|
||||
@@ -23,40 +23,6 @@ dmeventd is the event monitoring daemon for device-mapper devices.
|
||||
Library plugins can register and carry out actions triggered when
|
||||
particular events occur.
|
||||
.
|
||||
.SH LVM PLUGINS
|
||||
.
|
||||
.HP
|
||||
.IR Mirror
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.IR Raid
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.IR Snapshot
|
||||
.br
|
||||
Monitors how full a snapshot is becoming and emits a warning to
|
||||
syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the snapshot is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
Snapshot which runs out of space gets invalid and when it is mounted,
|
||||
it gets umounted if possible.
|
||||
.
|
||||
.HP
|
||||
.IR Thin
|
||||
.br
|
||||
Monitors how full a thin pool data and metadata is becoming and emits
|
||||
a warning to syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the thin pool is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
If the thin-pool runs out of space, thin volumes are umounted if possible.
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
@@ -104,6 +70,80 @@ events to monitor from the currently running daemon.
|
||||
.br
|
||||
Show version of dmeventd.
|
||||
.
|
||||
.SH LVM PLUGINS
|
||||
.
|
||||
.HP
|
||||
.BR Mirror
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.BR Raid
|
||||
.br
|
||||
Attempts to handle device failure automatically. See
|
||||
.BR lvm.conf (5).
|
||||
.
|
||||
.HP
|
||||
.BR Snapshot
|
||||
.br
|
||||
Monitors how full a snapshot is becoming and emits a warning to
|
||||
syslog when it exceeds 80% full.
|
||||
The warning is repeated when 85%, 90% and 95% of the snapshot is filled.
|
||||
See
|
||||
.BR lvm.conf (5).
|
||||
Snapshot which runs out of space gets invalid and when it is mounted,
|
||||
it gets umounted if possible.
|
||||
.
|
||||
.HP
|
||||
.BR Thin
|
||||
.br
|
||||
Monitors how full a thin pool data and metadata is becoming and emits
|
||||
a warning to syslog when it exceeds 80% full.
|
||||
The warning is repeated when more then 85%, 90% and 95%
|
||||
of the thin pool is filled. See
|
||||
.BR lvm.conf (5).
|
||||
When a thin pool fills over 50% (data or metadata) thin plugin calls
|
||||
configured \fIdmeventd/thin_command\fP with every 5% increase.
|
||||
With default setting it calls internal
|
||||
\fBlvm lvextend --use-policies\fP to resize thin pool
|
||||
when it's been filled above configured threshold
|
||||
\fIactivation/thin_pool_autoextend_threshold\fP.
|
||||
If the command fails, dmeventd thin plugin will keep
|
||||
retrying execution with increasing time delay between
|
||||
retries upto 42 minutes.
|
||||
User may also configure external command to support more advanced
|
||||
maintenance operations of a thin pool.
|
||||
Such external command can e.g. remove some unneeded snapshots,
|
||||
use \fBfstrim\fP(8) to free recover space in a thin pool,
|
||||
but also can use \fBlvextend --use-policies\fP if other actions
|
||||
have not released enough space.
|
||||
Command is executed with environmental variable
|
||||
\fBLVM_RUN_BY_DMEVENTD=1\fP so any lvm2 command executed
|
||||
in this environment will not try to interact with dmeventd.
|
||||
To see the fullness of a thin pool command may check these
|
||||
two environmental variables
|
||||
\fBDMEVENTD_THIN_POOL_DATA\fP and \fBDMEVENTD_THIN_POOL_DATA\fP.
|
||||
Command can also read status with tools like \fBlvs\fP(8).
|
||||
.
|
||||
.SH ENVIRONMENT VARIABLES
|
||||
.
|
||||
.TP
|
||||
.B DMEVENTD_THIN_POOL_DATA
|
||||
Variable is set by thin plugin and is available to executed program. Value present
|
||||
actual usage of thin pool data volume. Variable is not set when error event
|
||||
is processed.
|
||||
.TP
|
||||
.B DMEVENTD_THIN_POOL_DATA
|
||||
Variable is set by thin plugin and is available to executed program. Value present
|
||||
actual usage of thin pool metadata volume. Variable is not set when error event
|
||||
is processed.
|
||||
.TP
|
||||
.B LVM_RUN_BY_DMEVENTD
|
||||
Variable is set by thin plugin to prohibit recursive interation
|
||||
with dmeventd by any executed lvm2 command from
|
||||
a thin_command environment.
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.BR lvm (8),
|
||||
|
||||
@@ -27,9 +27,9 @@ dmsetup \(em low level logical volume management
|
||||
. IR uuid ]
|
||||
. RB \%[ \-\-addnodeoncreate | \-\-addnodeonresume ]
|
||||
. RB \%[ \-n | \-\-notable | \-\-table
|
||||
. RI \%{ table | table_file }]
|
||||
. IR \%table | table_file ]
|
||||
. RB [ \-\-readahead
|
||||
. RB \%{[ + ] \fIsectors | auto | none }]
|
||||
. RB \%[ + ] \fIsectors | auto | none ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_CREATE
|
||||
@@ -41,7 +41,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR deps
|
||||
. RB [ \-o
|
||||
. IR options ]
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
. ad b
|
||||
..
|
||||
.CMD_DEPS
|
||||
@@ -58,7 +58,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_INFO
|
||||
. BR info
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
..
|
||||
.CMD_INFO
|
||||
.
|
||||
@@ -92,7 +92,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR load
|
||||
. IR device_name
|
||||
. RB [ \-\-table
|
||||
. RI { table | table_file }]
|
||||
. IR table | table_file ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_LOAD
|
||||
@@ -117,7 +117,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_MANGLE
|
||||
. BR mangle
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
..
|
||||
.CMD_MANGLE
|
||||
.
|
||||
@@ -135,7 +135,7 @@ dmsetup \(em low level logical volume management
|
||||
.B dmsetup
|
||||
.de CMD_MKNODES
|
||||
. BR mknodes
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
..
|
||||
.CMD_MKNODES
|
||||
.
|
||||
@@ -146,7 +146,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR reload
|
||||
. IR device_name
|
||||
. RB [ \-\-table
|
||||
. RI { table | table_file }]
|
||||
. IR table | table_file ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_RELOAD
|
||||
@@ -159,7 +159,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-f | \-\-force ]
|
||||
. RB [ \-\-retry ]
|
||||
. RB [ \-\-deferred ]
|
||||
. IR device_name
|
||||
. IR device_name ...
|
||||
. ad b
|
||||
..
|
||||
.CMD_REMOVE
|
||||
@@ -197,12 +197,12 @@ dmsetup \(em low level logical volume management
|
||||
.de CMD_RESUME
|
||||
. ad l
|
||||
. BR resume
|
||||
. IR device_name
|
||||
. IR device_name ...
|
||||
. RB [ \-\-addnodeoncreate | \-\-addnodeonresume ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RB [ \-\-nolockfs ]
|
||||
. RB \%[ \-\-readahead
|
||||
. RB \%{[ + ] \fIsectors | auto | none }]
|
||||
. RB \%[ + ] \fIsectors | auto | none ]
|
||||
. ad b
|
||||
..
|
||||
.CMD_RESUME
|
||||
@@ -247,7 +247,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-\-target
|
||||
. IR target_type ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
. ad b
|
||||
..
|
||||
.CMD_STATUS
|
||||
@@ -259,7 +259,7 @@ dmsetup \(em low level logical volume management
|
||||
. BR suspend
|
||||
. RB [ \-\-nolockfs ]
|
||||
. RB [ \-\-noflush ]
|
||||
. IR device_name
|
||||
. IR device_name ...
|
||||
. ad b
|
||||
..
|
||||
.CMD_SUSPEND
|
||||
@@ -272,7 +272,7 @@ dmsetup \(em low level logical volume management
|
||||
. RB [ \-\-target
|
||||
. IR target_type ]
|
||||
. RB [ \-\-showkeys ]
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name ...]
|
||||
. ad b
|
||||
..
|
||||
.CMD_TABLE
|
||||
@@ -354,7 +354,7 @@ dmsetup \(em low level logical volume management
|
||||
.de CMD_WIPE_TABLE
|
||||
. ad l
|
||||
. BR wipe_table
|
||||
. IR device_name
|
||||
. IR device_name ...
|
||||
. RB [ \-f | \-\-force ]
|
||||
. RB [ \-\-noflush ]
|
||||
. RB [ \-\-nolockfs ]
|
||||
@@ -447,7 +447,7 @@ The default interval is one second.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-manglename
|
||||
.RB { auto | hex | none }
|
||||
.BR auto | hex | none
|
||||
.br
|
||||
Mangle any character not on a whitelist using mangling_mode when
|
||||
processing device-mapper device names and UUIDs. The names and UUIDs
|
||||
@@ -529,7 +529,7 @@ Specify which fields to display.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-readahead
|
||||
.RB {[ + ] \fIsectors | auto | none }
|
||||
.RB [ + ] \fIsectors | auto | none
|
||||
.br
|
||||
Specify read ahead size in units of sectors.
|
||||
The default value is \fBauto\fP which allows the kernel to choose
|
||||
@@ -820,8 +820,10 @@ Outputs the current table for the device in a format that can be fed
|
||||
back in using the create or load commands.
|
||||
With \fB\-\-target\fP, only information relating to the specified target type
|
||||
is displayed.
|
||||
Encryption keys are suppressed in the table output for the crypt
|
||||
target unless the \fB\-\-showkeys\fP parameter is supplied.
|
||||
Real encryption keys are suppressed in the table output for the crypt
|
||||
target unless the \fB\-\-showkeys\fP parameter is supplied. Kernel key
|
||||
references prefixed with \fB:\fP are not affected by the parameter and get
|
||||
displayed always.
|
||||
.
|
||||
.HP
|
||||
.CMD_TARGETS
|
||||
|
||||
@@ -44,7 +44,7 @@ dmstats \(em device-mapper statistics management
|
||||
.B dmsetup
|
||||
.B stats
|
||||
.I command
|
||||
.RB [ options ]
|
||||
[OPTIONS]
|
||||
.sp
|
||||
.
|
||||
.PD 0
|
||||
@@ -53,13 +53,13 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_COMMAND
|
||||
. ad l
|
||||
. IR command
|
||||
. RI [ device_name |
|
||||
. RB [ \-u | \-\-uuid
|
||||
. IR uuid ]
|
||||
. RB | [ \-\-major
|
||||
. IR device_name " |"
|
||||
. BR \-\-major
|
||||
. IR major
|
||||
. BR \-\-minor
|
||||
. IR minor ]
|
||||
. IR minor " |"
|
||||
. BR \-u | \-\-uuid
|
||||
. IR uuid
|
||||
. RB \%[ \-v | \-\-verbose]
|
||||
. ad b
|
||||
..
|
||||
@@ -82,9 +82,7 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_CREATE
|
||||
. ad l
|
||||
. BR create
|
||||
. RB [ device_name...
|
||||
. RB | file_path...
|
||||
. RB | [ \-\-alldevices ]]
|
||||
. IR device_name... | file_path... | \fB\-\-alldevices
|
||||
. RB [ \-\-areas
|
||||
. IR nr_areas | \fB\-\-areasize
|
||||
. IR area_size ]
|
||||
@@ -110,8 +108,7 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_DELETE
|
||||
. ad l
|
||||
. BR delete
|
||||
. RI [ device_name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. IR device_name | \fB\-\-alldevices
|
||||
. OPT_PROGRAMS
|
||||
. OPT_REGIONS
|
||||
. ad b
|
||||
@@ -123,10 +120,9 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_GROUP
|
||||
. ad l
|
||||
. BR group
|
||||
. RI [ device_name ]
|
||||
. RI [ device_name | \fB\-\-alldevices ]
|
||||
. RB [ \-\-alias
|
||||
. IR name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. RB [ \-\-regions
|
||||
. IR regions ]
|
||||
. ad b
|
||||
@@ -205,8 +201,7 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_UNGROUP
|
||||
. ad l
|
||||
. BR ungroup
|
||||
. RI [ device_name ]
|
||||
. RB [ \-\-alldevices ]
|
||||
. RI [ device_name | \fB\-\-alldevices ]
|
||||
. RB [ \-\-groupid
|
||||
. IR id ]
|
||||
. ad b
|
||||
@@ -217,7 +212,7 @@ dmstats \(em device-mapper statistics management
|
||||
.de CMD_UPDATE_FILEMAP
|
||||
. ad l
|
||||
. BR update_filemap
|
||||
. RI file_path
|
||||
. IR file_path
|
||||
. RB [ \-\-groupid
|
||||
. IR id ]
|
||||
. ad b
|
||||
|
||||
2
man/lvchange.8.des
Normal file
2
man/lvchange.8.des
Normal file
@@ -0,0 +1,2 @@
|
||||
lvchange changes LV attributes in the VG, changes LV activation in the
|
||||
kernel, and includes other utilities for LV maintenance.
|
||||
6
man/lvchange.8.end
Normal file
6
man/lvchange.8.end
Normal file
@@ -0,0 +1,6 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Change LV permission to read-only:
|
||||
.sp
|
||||
.B lvchange \-pr vg00/lvol1
|
||||
|
||||
@@ -1,491 +0,0 @@
|
||||
.TH LVCHANGE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
|
||||
.de UNITS
|
||||
..
|
||||
|
||||
.
|
||||
.SH NAME
|
||||
.
|
||||
lvchange \(em change attributes of a logical volume
|
||||
.
|
||||
.SH SYNOPSIS
|
||||
.
|
||||
.ad l
|
||||
.B lvchange
|
||||
.RB [ \-a | \-\-activate
|
||||
.RB [ a ][ e | s | l ]{ y | n }]
|
||||
.RB [ \-\-activationmode
|
||||
.RB { complete | degraded | partial }]
|
||||
.RB [ \-\-addtag
|
||||
.IR Tag ]
|
||||
.RB [ \-K | \-\-ignoreactivationskip ]
|
||||
.RB [ \-k | \-\-setactivationskip
|
||||
.RB { y | n }]
|
||||
.RB [ \-\-alloc
|
||||
.IR AllocationPolicy ]
|
||||
.RB [ \-A | \-\-autobackup
|
||||
.RB { y | n }]
|
||||
.RB [ \-\-rebuild
|
||||
.IR PhysicalVolume ]
|
||||
.RB [ \-\-cachemode
|
||||
.RB { passthrough | writeback | writethrough }]
|
||||
.RB [ \-\-cachepolicy
|
||||
.IR Policy ]
|
||||
.RB [ \-\-cachesettings
|
||||
.IR Key \fB= Value ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-C | \-\-contiguous
|
||||
.RB { y | n }]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-\-deltag
|
||||
.IR Tag ]
|
||||
.RB [ \-\-detachprofile ]
|
||||
.RB [ \-\-discards
|
||||
.RB { ignore | nopassdown | passdown }]
|
||||
.RB [ \-\-errorwhenfull
|
||||
.RB { y | n }]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB \%[ \-\-ignorelockingfailure ]
|
||||
.RB \%[ \-\-ignoremonitoring ]
|
||||
.RB \%[ \-\-ignoreskippedcluster ]
|
||||
.RB \%[ \-\-metadataprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-\-monitor
|
||||
.RB { y | n }]
|
||||
.RB [ \-\-noudevsync ]
|
||||
.RB [ \-P | \-\-partial ]
|
||||
.RB [ \-p | \-\-permission
|
||||
.RB { r | rw }]
|
||||
.RB [ \-M | \-\-persistent
|
||||
.RB { y | n }
|
||||
.RB [ \-\-major
|
||||
.IR Major ]
|
||||
.RB [ \-\-minor
|
||||
.IR Minor ]]
|
||||
.RB [ \-\-poll
|
||||
.RB { y | n }]
|
||||
.RB [ \-\- [ raid ] maxrecoveryrate
|
||||
.IR Rate ]
|
||||
.RB [ \-\- [ raid ] minrecoveryrate
|
||||
.IR Rate ]
|
||||
.RB [ \-\- [ raid ] syncaction
|
||||
.RB { check | repair }]
|
||||
.RB [ \-\- [ raid ] writebehind
|
||||
.IR IOCount ]
|
||||
.RB [ \-\- [ raid ] writemostly
|
||||
.BR \fIPhysicalVolume [ : { y | n | t }]]
|
||||
.RB [ \-r | \-\-readahead
|
||||
.RB { \fIReadAheadSectors | auto | none }]
|
||||
.RB [ \-\-refresh ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-\-resync ]
|
||||
.RB [ \-S | \-\-select
|
||||
.IR Selection ]
|
||||
.RB [ \-\-sysinit ]
|
||||
.RB [ \-t | \-\-test ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-Z | \-\-zero
|
||||
.RB { y | n }]
|
||||
.RI [ LogicalVolumePath ...]
|
||||
.ad b
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
.
|
||||
lvchange allows you to change the attributes of a logical volume
|
||||
including making them known to the kernel ready for use.
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.
|
||||
.HP
|
||||
.BR \-a | \-\-activate
|
||||
.RB [ a ][ e | s | l ]{ y | n }
|
||||
.br
|
||||
Controls the availability of the logical volumes for use.
|
||||
Communicates with the kernel device-mapper driver via
|
||||
libdevmapper to activate (\fB\-ay\fP) or deactivate (\fB\-an\fP) the
|
||||
logical volumes.
|
||||
.br
|
||||
Activation of a logical volume creates a symbolic link
|
||||
\fI/dev/VolumeGroupName/LogicalVolumeName\fP pointing to the device node.
|
||||
This link is removed on deactivation.
|
||||
All software and scripts should access the device through
|
||||
this symbolic link and present this as the name of the device.
|
||||
The location and name of the underlying device node may depend on
|
||||
the distribution and configuration (e.g. udev) and might change
|
||||
from release to release.
|
||||
.br
|
||||
If autoactivation option is used (\fB\-aay\fP),
|
||||
the logical volume is activated only if it matches an item in
|
||||
the \fBactivation/auto_activation_volume_list\fP
|
||||
set in \fBlvm.conf\fP(5).
|
||||
If this list is not set, then all volumes are considered for
|
||||
activation. The \fB\-aay\fP option should be also used during system
|
||||
boot so it's possible to select which volumes to activate using
|
||||
the \fBactivation/auto_activation_volume_list\fP setting.
|
||||
.br
|
||||
In a clustered VG, clvmd is used for activation, and the
|
||||
following options are possible:
|
||||
|
||||
With \fB\-aey\fP, clvmd activates the LV in exclusive mode
|
||||
(with an exclusive lock), allowing a single node to activate the LV.
|
||||
|
||||
With \fB\-asy\fP, clvmd activates the LV in shared mode
|
||||
(with a shared lock), allowing multiple nodes to activate the LV concurrently.
|
||||
If the LV type prohibits shared access, such as an LV with a snapshot,
|
||||
the '\fBs\fP' option is ignored and an exclusive lock is used.
|
||||
|
||||
With \fB\-ay\fP (no mode specified), clvmd activates the LV in shared mode
|
||||
if the LV type allows concurrent access, such as a linear LV.
|
||||
Otherwise, clvmd activates the LV in exclusive mode.
|
||||
|
||||
With \fB\-aey\fP, \fB\-asy\fP, and \fB\-ay\fP, clvmd attempts to activate the LV
|
||||
on all nodes. If exclusive mode is used, then only one of the
|
||||
nodes will be successful.
|
||||
|
||||
With \fB\-an\fP, clvmd attempts to deactivate the LV on all nodes.
|
||||
|
||||
With \fB\-aly\fP, clvmd activates the LV only on the local node, and \fB\-aln\fP
|
||||
deactivates only on the local node. If the LV type allows concurrent
|
||||
access, then shared mode is used, otherwise exclusive.
|
||||
|
||||
LVs with snapshots are always activated exclusively because they can only
|
||||
be used on one node at once.
|
||||
|
||||
For local VGs \fB\-ay\fP, \fB\-aey\fP, and \fB\-asy\fP are all equivalent.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-activationmode
|
||||
.RB { complete | degraded | partial }
|
||||
.br
|
||||
The activation mode determines whether logical volumes are allowed to
|
||||
activate when there are physical volumes missing (e.g. due to a device
|
||||
failure). \fBcomplete\fP is the most restrictive; allowing only those
|
||||
logical volumes to be activated that are not affected by the missing
|
||||
PVs. \fBdegraded\fP allows RAID logical volumes to be activated even if
|
||||
they have PVs missing. (Note that the "\fImirror\fP" segment type is not
|
||||
considered a RAID logical volume. The "\fIraid1\fP" segment type should
|
||||
be used instead.) Finally, \fBpartial\fP allows any logical volume to
|
||||
be activated even if portions are missing due to a missing or failed
|
||||
PV. This last option should only be used when performing recovery or
|
||||
repair operations. \fBdegraded\fP is the default mode. To change it,
|
||||
modify \fBactivation_mode\fP in \fBlvm.conf\fP(5).
|
||||
.
|
||||
.HP
|
||||
.BR \-K | \-\-ignoreactivationskip
|
||||
.br
|
||||
Ignore the flag to skip Logical Volumes during activation.
|
||||
.
|
||||
.HP
|
||||
.BR \-k | \-\-setactivationskip
|
||||
.RB { y | n }
|
||||
.br
|
||||
Controls whether Logical Volumes are persistently flagged to be
|
||||
skipped during activation. By default, thin snapshot volumes are
|
||||
flagged for activation skip. To activate such volumes,
|
||||
an extra \fB\-\-ignoreactivationskip\fP option must be used.
|
||||
The flag is not applied during deactivation. To see whether
|
||||
the flag is attached, use \fBlvs\fP(8) command where the state
|
||||
of the flag is reported within \fBlv_attr\fP bits.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachemode
|
||||
.RB { passthrough | writeback | writethrough }
|
||||
.br
|
||||
Specifying a cache mode determines when the writes to a cache LV
|
||||
are considered complete. When \fBwriteback\fP is specified, a write is
|
||||
considered complete as soon as it is stored in the cache pool LV.
|
||||
If \fBwritethough\fP is specified, a write is considered complete only
|
||||
when it has been stored in the cache pool LV and on the origin LV.
|
||||
While \fBwritethrough\fP may be slower for writes, it is more
|
||||
resilient if something should happen to a device associated with the
|
||||
cache pool LV. With \fBpassthrough\fP mode, all reads are served
|
||||
from origin LV (all reads miss the cache) and all writes are
|
||||
forwarded to the origin LV; additionally, write hits cause cache
|
||||
block invalidates. See \fBlvmcache(7)\fP for more details.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachepolicy
|
||||
.IR Policy ,
|
||||
.BR \-\-cachesettings
|
||||
.IR Key \fB= Value
|
||||
.br
|
||||
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
|
||||
the cache policy and its associated tunable settings. In most use-cases,
|
||||
default values should be adequate.
|
||||
.
|
||||
.HP
|
||||
.BR \-C | \-\-contiguous
|
||||
.RB { y | n }
|
||||
.br
|
||||
Tries to set or reset the contiguous allocation policy for
|
||||
logical volumes. It's only possible to change a non-contiguous
|
||||
logical volume's allocation policy to contiguous, if all of the
|
||||
allocated physical extents are already contiguous.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-detachprofile
|
||||
.br
|
||||
Detach any metadata configuration profiles attached to given
|
||||
Logical Volumes. See \fBlvm.conf\fP(5) for more information
|
||||
about metadata profiles.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-discards
|
||||
.RB { ignore | nopassdown | passdown }
|
||||
.br
|
||||
Set this to \fBignore\fP to ignore any discards received by a
|
||||
thin pool Logical Volume. Set to \fBnopassdown\fP to process such
|
||||
discards within the thin pool itself and allow the no-longer-needed
|
||||
extents to be overwritten by new data. Set to \fBpassdown\fP (the
|
||||
default) to process them both within the thin pool itself and to
|
||||
pass them down the underlying device.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-errorwhenfull
|
||||
.RB { y | n }
|
||||
.br
|
||||
Sets thin pool behavior when data space is exhaused. See
|
||||
.BR lvcreate (8)
|
||||
for information.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-ignoremonitoring
|
||||
.br
|
||||
Make no attempt to interact with dmeventd unless \fB\-\-monitor\fP
|
||||
is specified.
|
||||
Do not use this if dmeventd is already monitoring a device.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-major
|
||||
.IR Major
|
||||
.br
|
||||
Sets the major number. This option is supported only on older systems
|
||||
(kernel version 2.4) and is ignored on modern Linux systems where major
|
||||
numbers are dynamically assigned.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-minor
|
||||
.IR Minor
|
||||
.br
|
||||
Set the minor number.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-metadataprofile
|
||||
.IR ProfileName
|
||||
.br
|
||||
Uses and attaches \fIProfileName\fP configuration profile to the logical
|
||||
volume metadata. Whenever the logical volume is processed next time,
|
||||
the profile is automatically applied. If the volume group has another
|
||||
profile attached, the logical volume profile is preferred.
|
||||
See \fBlvm.conf\fP(5) for more information about metadata profiles.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-monitor
|
||||
.RB { y | n }
|
||||
.br
|
||||
Start or stop monitoring a mirrored or snapshot logical volume with
|
||||
dmeventd, if it is installed.
|
||||
If a device used by a monitored mirror reports an I/O error,
|
||||
the failure is handled according to
|
||||
\%\fBmirror_image_fault_policy\fP and \fBmirror_log_fault_policy\fP
|
||||
set in \fBlvm.conf\fP(5).
|
||||
.
|
||||
.HP
|
||||
.BR \-\-noudevsync
|
||||
.br
|
||||
Disable udev synchronisation. The
|
||||
process will not wait for notification from udev.
|
||||
It will continue irrespective of any possible udev processing
|
||||
in the background. You should only use this if udev is not running
|
||||
or has rules that ignore the devices LVM2 creates.
|
||||
.
|
||||
.HP
|
||||
.BR \-p | \-\-permission
|
||||
.RB { r | rw }
|
||||
.br
|
||||
Change access permission to read-only or read/write.
|
||||
.
|
||||
.HP
|
||||
.BR \-M | \-\-persistent
|
||||
.RB { y | n }
|
||||
.br
|
||||
Set to \fBy\fP to make the minor number specified persistent.
|
||||
Change of persistent numbers is not supported for pool volumes.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-poll
|
||||
.RB { y | n }
|
||||
.br
|
||||
Without polling a logical volume's backgrounded transformation process
|
||||
will never complete. If there is an incomplete pvmove or lvconvert (for
|
||||
example, on rebooting after a crash), use \fB\-\-poll y\fP to restart the
|
||||
process from its last checkpoint. However, it may not be appropriate to
|
||||
immediately poll a logical volume when it is activated, use
|
||||
\fB\-\-poll n\fP to defer and then \fB\-\-poll y\fP to restart the process.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] rebuild
|
||||
.BR \fIPhysicalVolume
|
||||
.br
|
||||
Option can be repeated multiple times.
|
||||
Selects PhysicalVolume(s) to be rebuild in a RaidLV.
|
||||
Use this option instead of
|
||||
.BR \-\-resync
|
||||
or
|
||||
.BR \-\- [ raid ] syncaction
|
||||
\fBrepair\fP in case the PVs with corrupted data are known and their data
|
||||
should be reconstructed rather than reconstructing default (rotating) data.
|
||||
.br
|
||||
E.g. in a raid1 mirror, the master leg on /dev/sda may hold corrupt data due
|
||||
to a known transient disk error, thus
|
||||
.br
|
||||
\fBlvchange --rebuild /dev/sda LV\fP
|
||||
.br
|
||||
will request the master leg to be rebuild rather than rebuilding
|
||||
all other legs from the master.
|
||||
On a raid5 with rotating data and parity
|
||||
.br
|
||||
\fBlvchange --rebuild /dev/sda LV\fP
|
||||
.br
|
||||
will rebuild all data and parity blocks in the stripe on /dev/sda.
|
||||
.HP
|
||||
.BR \-\- [ raid ] maxrecoveryrate
|
||||
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
|
||||
.br
|
||||
Sets the maximum recovery rate for a RAID logical volume. \fIRate\fP
|
||||
is specified as an amount per second for each device in the array.
|
||||
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
||||
recovery rate to \fB0\fP means it will be unbounded.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] minrecoveryrate
|
||||
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
|
||||
.br
|
||||
Sets the minimum recovery rate for a RAID logical volume. \fIRate\fP
|
||||
is specified as an amount per second for each device in the array.
|
||||
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
||||
recovery rate to \fB0\fP means it will be unbounded.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] syncaction
|
||||
.RB { check | repair }
|
||||
.br
|
||||
This argument is used to initiate various RAID synchronization operations.
|
||||
The \fBcheck\fP and \fBrepair\fP options provide a way to check the
|
||||
integrity of a RAID logical volume (often referred to as "scrubbing").
|
||||
These options cause the RAID logical volume to
|
||||
read all of the data and parity blocks in the array and check for any
|
||||
discrepancies (e.g. mismatches between mirrors or incorrect parity values).
|
||||
If \fBcheck\fP is used, the discrepancies will be counted but not repaired.
|
||||
If \fBrepair\fP is used, the discrepancies will be corrected as they are
|
||||
encountered. The \fBlvs\fP(8) command can be used to show the number of
|
||||
discrepancies found or repaired.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] writebehind
|
||||
.IR IOCount
|
||||
.br
|
||||
Specify the maximum number of outstanding writes that are allowed to
|
||||
devices in a RAID1 logical volume that are marked as write-mostly.
|
||||
Once this value is exceeded, writes become synchronous (i.e. all writes
|
||||
to the constituent devices must complete before the array signals the
|
||||
write has completed). Setting the value to zero clears the preference
|
||||
and allows the system to choose the value arbitrarily.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] writemostly
|
||||
.BR \fIPhysicalVolume [ : { y | n | t }]
|
||||
.br
|
||||
Mark a device in a RAID1 logical volume as write-mostly. All reads
|
||||
to these drives will be avoided unless absolutely necessary. This keeps
|
||||
the number of I/Os to the drive to a minimum. The default behavior is to
|
||||
set the write-mostly attribute for the specified physical volume in the
|
||||
logical volume. It is possible to also remove the write-mostly flag by
|
||||
appending a "\fB:n\fP" to the physical volume or to toggle the value by specifying
|
||||
"\fB:t\fP". The \fB\-\-writemostly\fP argument can be specified more than one time
|
||||
in a single command; making it possible to toggle the write-mostly attributes
|
||||
for all the physical volumes in a logical volume at once.
|
||||
.
|
||||
.HP
|
||||
.BR \-r | \-\-readahead
|
||||
.RB { \fIReadAheadSectors | auto | none }
|
||||
.br
|
||||
Set read ahead sector count of this logical volume.
|
||||
For volume groups with metadata in lvm1 format, this must
|
||||
be a value between 2 and 120 sectors.
|
||||
The default value is "\fBauto\fP" which allows the kernel to choose
|
||||
a suitable value automatically.
|
||||
"\fBnone\fP" is equivalent to specifying zero.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-refresh
|
||||
.br
|
||||
If the logical volume is active, reload its metadata.
|
||||
This is not necessary in normal operation, but may be useful
|
||||
if something has gone wrong or if you're doing clustering
|
||||
manually without a clustered lock manager.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-resync
|
||||
.br
|
||||
Forces the complete resynchronization of a mirror. In normal
|
||||
circumstances you should not need this option because synchronization
|
||||
happens automatically. Data is read from the primary mirror device
|
||||
and copied to the others, so this can take a considerable amount of
|
||||
time - and during this time you are without a complete redundant copy
|
||||
of your data.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-sysinit
|
||||
.br
|
||||
Indicates that \fBlvchange\fP(8) is being invoked from early system
|
||||
initialisation scripts (e.g. rc.sysinit or an initrd),
|
||||
before writeable filesystems are available. As such,
|
||||
some functionality needs to be disabled and this option
|
||||
acts as a shortcut which selects an appropriate set of options. Currently
|
||||
this is equivalent to using \fB\-\-ignorelockingfailure\fP,
|
||||
\fB\-\-ignoremonitoring\fP, \fB\-\-poll n\fP and setting
|
||||
\fBLVM_SUPPRESS_LOCKING_FAILURE_MESSAGES\fP
|
||||
environment variable.
|
||||
|
||||
If \fB\-\-sysinit\fP is used in conjunction with
|
||||
\fBlvmetad\fP(8) enabled and running,
|
||||
autoactivation is preferred over manual activation via direct lvchange call.
|
||||
Logical volumes are autoactivated according to
|
||||
\fBauto_activation_volume_list\fP set in \fBlvm.conf\fP(5).
|
||||
.
|
||||
.HP
|
||||
.BR \-Z | \-\-zero
|
||||
.RB { y | n }
|
||||
.br
|
||||
Set zeroing mode for thin pool. Note: already provisioned blocks from pool
|
||||
in non-zero mode are not cleared in unwritten parts when setting zero to
|
||||
\fBy\fP.
|
||||
.
|
||||
.SH ENVIRONMENT VARIABLES
|
||||
.
|
||||
.TP
|
||||
.B LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES
|
||||
Suppress locking failure messages.
|
||||
.
|
||||
.SH Examples
|
||||
.
|
||||
Changes the permission on volume lvol1 in volume group vg00 to be read-only:
|
||||
.sp
|
||||
.B lvchange \-pr vg00/lvol1
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.nh
|
||||
.BR lvm (8),
|
||||
.BR lvmetad (8),
|
||||
.BR lvs (8),
|
||||
.BR lvcreate (8),
|
||||
.BR vgchange (8),
|
||||
.BR lvmcache (7),
|
||||
.BR lvmthin (7),
|
||||
.BR lvm.conf (5)
|
||||
32
man/lvconvert.8.des
Normal file
32
man/lvconvert.8.des
Normal file
@@ -0,0 +1,32 @@
|
||||
lvconvert changes the LV type and includes utilities for LV data
|
||||
maintenance. The LV type controls data layout and redundancy.
|
||||
The LV type is also called the segment type or segtype.
|
||||
|
||||
To display the current LV type, run the command:
|
||||
|
||||
.B lvs \-o name,segtype
|
||||
.I LV
|
||||
|
||||
The
|
||||
.B linear
|
||||
type is equivalent to the
|
||||
.B striped
|
||||
type when one stripe exists.
|
||||
In that case, the types can sometimes be used interchangably.
|
||||
|
||||
In most cases, the
|
||||
.B mirror
|
||||
type is deprecated and the
|
||||
.B raid1
|
||||
type should be used. They are both implementations of mirroring.
|
||||
|
||||
In some cases, an LV is a single device mapper (dm) layer above physical
|
||||
devices. In other cases, hidden LVs (dm devices) are layered between the
|
||||
visible LV and physical devices. LVs in the middle layers are called sub LVs.
|
||||
A command run on a visible LV sometimes operates on a sub LV rather than
|
||||
the specified LV. In other cases, a sub LV must be specified directly on
|
||||
the command line.
|
||||
|
||||
Sub LVs can be displayed with the command
|
||||
.B lvs -a
|
||||
|
||||
116
man/lvconvert.8.end
Normal file
116
man/lvconvert.8.end
Normal file
@@ -0,0 +1,116 @@
|
||||
.SH NOTES
|
||||
|
||||
This previous command syntax would perform two different operations:
|
||||
.br
|
||||
\fBlvconvert --thinpool\fP \fILV1\fP \fB--poolmetadata\fP \fILV2\fP
|
||||
.br
|
||||
If LV1 was not a thin pool, the command would convert LV1 to
|
||||
a thin pool, optionally using a specified LV for metadata.
|
||||
But, if LV1 was already a thin pool, the command would swap
|
||||
the current metadata LV with LV2 (for repair purposes.)
|
||||
|
||||
In the same way, this previous command syntax would perform two different
|
||||
operations:
|
||||
.br
|
||||
\fBlvconvert --cachepool\fP \fILV1\fP \fB--poolmetadata\fP \fILV2\fP
|
||||
.br
|
||||
If LV1 was not a cache pool, the command would convert LV1 to
|
||||
a cache pool, optionally using a specified LV for metadata.
|
||||
But, if LV1 was already a cache pool, the command would swap
|
||||
the current metadata LV with LV2 (for repair purposes.)
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Convert a linear LV to a two-way mirror LV.
|
||||
.br
|
||||
.B lvconvert \-\-type mirror \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way RAID1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 \-\-mirrors 1 vg/lvol1
|
||||
|
||||
Convert a mirror LV to use an in\-memory log.
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog core vg/lvol1
|
||||
|
||||
Convert a mirror LV to use a disk log.
|
||||
.br
|
||||
.B lvconvert \-\-mirrorlog disk vg/lvol1
|
||||
|
||||
Convert a mirror or raid1 LV to a linear LV.
|
||||
.br
|
||||
.B lvconvert --type linear vg/lvol1
|
||||
|
||||
Convert a mirror LV to a raid1 LV with the same number of images.
|
||||
.br
|
||||
.B lvconvert \-\-type raid1 vg/lvol1
|
||||
|
||||
Convert a linear LV to a two-way mirror LV, allocating new extents from specific
|
||||
PV ranges.
|
||||
.br
|
||||
.B lvconvert \-\-mirrors 1 vg/lvol1 /dev/sda:0\-15 /dev/sdb:0\-15
|
||||
|
||||
Convert a mirror LV to a linear LV, freeing physical extents from a specific PV.
|
||||
.br
|
||||
.B lvconvert \-\-type linear vg/lvol1 /dev/sda
|
||||
|
||||
Split one image from a mirror or raid1 LV, making it a new LV.
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-name lv_split vg/lvol1
|
||||
|
||||
Split one image from a raid1 LV, and track changes made to the raid1 LV
|
||||
while the split image remains detached.
|
||||
.br
|
||||
.B lvconvert \-\-splitmirrors 1 \-\-trackchanges vg/lvol1
|
||||
|
||||
Merge an image (that was previously created with \-\-splitmirrors and
|
||||
\-\-trackchanges) back into the original raid1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-mergemirrors vg/lvol1_rimage_1
|
||||
|
||||
Replace PV /dev/sdb1 with PV /dev/sdf1 in a raid1/4/5/6/10 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 vg/lvol1 /dev/sdf1
|
||||
|
||||
Replace 3 PVs /dev/sd[b-d]1 with PVs /dev/sd[f-h]1 in a raid1 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 \-\-replace /dev/sdd1
|
||||
.RS
|
||||
.B vg/lvol1 /dev/sd[fgh]1
|
||||
.RE
|
||||
|
||||
Replace the maximum of 2 PVs /dev/sd[bc]1 with PVs /dev/sd[gh]1 in a raid6 LV.
|
||||
.br
|
||||
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 vg/lvol1 /dev/sd[gh]1
|
||||
|
||||
Convert an LV into a thin LV in the specified thin pool. The existing LV
|
||||
is used as an external read\-only origin for the new thin LV.
|
||||
.br
|
||||
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1 vg/lvol1
|
||||
|
||||
Convert an LV into a thin LV in the specified thin pool. The existing LV
|
||||
is used as an external read\-only origin for the new thin LV, and is
|
||||
renamed "external".
|
||||
.br
|
||||
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1
|
||||
.RS
|
||||
.B \-\-originname external vg/lvol1
|
||||
.RE
|
||||
|
||||
Convert an LV to a cache pool LV using another specified LV for cache pool
|
||||
metadata.
|
||||
.br
|
||||
.B lvconvert \-\-type cache-pool \-\-poolmetadata vg/poolmeta1 vg/lvol1
|
||||
|
||||
Convert an LV to a cache LV using the specified cache pool and chunk size.
|
||||
.br
|
||||
.B lvconvert \-\-type cache \-\-cachepool vg/cpool1 \-c 128 vg/lvol1
|
||||
|
||||
Detach and keep the cache pool from a cache LV.
|
||||
.br
|
||||
.B lvconvert \-\-splitcache vg/lvol1
|
||||
|
||||
Detach and remove the cache pool from a cache LV.
|
||||
.br
|
||||
.B lvconvert \-\-uncache vg/lvol1
|
||||
|
||||
1199
man/lvconvert.8.in
1199
man/lvconvert.8.in
File diff suppressed because it is too large
Load Diff
39
man/lvcreate.8.des
Normal file
39
man/lvcreate.8.des
Normal file
@@ -0,0 +1,39 @@
|
||||
lvcreate creates a new LV in a VG. For standard LVs, this requires
|
||||
allocating logical extents from the VG's free physical extents. If there
|
||||
is not enough free space, then the VG can be extended (see
|
||||
\fBvgextend\fP(8)) with other PVs, or existing LVs can be reduced or
|
||||
removed (see \fBlvremove\fP, \fBlvreduce\fP.)
|
||||
|
||||
To control which PVs a new LV will use, specify one or more PVs as
|
||||
position args at the end of the command line. lvcreate will allocate
|
||||
physical extents only from the specified PVs.
|
||||
|
||||
lvcreate can also create snapshots of existing LVs, e.g. for backup
|
||||
purposes. The data in a new snapshot LV represents the content of the
|
||||
original LV from the time the snapshot was created.
|
||||
|
||||
RAID LVs can be created by specifying an LV type when creating the LV (see
|
||||
\fBlvmraid\fP(7)). Different RAID levels require different numbers of
|
||||
unique PVs be available in the VG for allocation.
|
||||
|
||||
Thin pools (for thin provisioning) and cache pools (for caching) are
|
||||
represented by special LVs with types thin-pool and cache-pool (see
|
||||
\fBlvmthin\fP(7) and \fBlvmcache\fP(7)). The pool LVs are not usable as
|
||||
standard block devices, but the LV names act references to the pools.
|
||||
|
||||
Thin LVs are thinly provisioned from a thin pool, and are created with a
|
||||
virtual size rather than a physical size. A cache LV is the combination of
|
||||
a standard LV with a cache pool, used to cache active portions of the LV
|
||||
to improve performance.
|
||||
|
||||
.SS Usage notes
|
||||
|
||||
In the usage section below, \fB--size\fP \fINumber\fP can be replaced
|
||||
in each case with \fB--extents\fP \fINumberExtents\fP. Also see both
|
||||
descriptions the options section.
|
||||
|
||||
In the usage section below, \fB--name\fP is omitted from the required
|
||||
options, even though it is typically used. When the name is not
|
||||
specified, a new LV name is generated with the "lvol" prefix and a unique
|
||||
numeric suffix. Also see the description in the options section.
|
||||
|
||||
98
man/lvcreate.8.end
Normal file
98
man/lvcreate.8.end
Normal file
@@ -0,0 +1,98 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Create a striped LV with 3 stripes, a stripe size of 8KiB and a size of 100MiB.
|
||||
The LV name is chosen by lvcreate.
|
||||
.br
|
||||
.B lvcreate \-i 3 \-I 8 \-L 100m vg00
|
||||
|
||||
Create a raid1 LV with two images, and a useable size of 500 MiB. This
|
||||
operation requires two devices, one for each mirror image. RAID metadata
|
||||
(superblock and bitmap) is also included on the two devices.
|
||||
.br
|
||||
.B lvcreate \-\-type raid1 \-m1 \-L 500m \-n mylv vg00
|
||||
|
||||
Create a mirror LV with two images, and a useable size of 500 MiB.
|
||||
This operation requires three devices: two for mirror images and
|
||||
one for a disk log.
|
||||
.br
|
||||
.B lvcreate \-\-type mirror \-m1 \-L 500m \-n mylv vg00
|
||||
|
||||
Create a mirror LV with 2 images, and a useable size of 500 MiB.
|
||||
This operation requires 2 devices because the log is in memory.
|
||||
.br
|
||||
.B lvcreate \-\-type mirror \-m1 \-\-mirrorlog core \-L 500m \-n mylv vg00
|
||||
|
||||
Create a copy\-on\-write snapshot of an LV:
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-size 100m \-\-name mysnap vg00/mylv
|
||||
|
||||
Create a copy\-on\-write snapshot with a size sufficient
|
||||
for overwriting 20% of the size of the original LV.
|
||||
.br
|
||||
.B lvcreate \-s \-l 20%ORIGIN \-n mysnap vg00/mylv
|
||||
|
||||
Create a sparse LV with 1TiB of virtual space, and actual space just under
|
||||
100MiB.
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-virtualsize 1t \-\-size 100m \-\-name mylv vg00
|
||||
|
||||
Create a linear LV with a usable size of 64MiB on specific physical extents.
|
||||
.br
|
||||
.B lvcreate \-L 64m \-n mylv vg00 /dev/sda:0\-7 /dev/sdb:0\-7
|
||||
|
||||
Create a RAID5 LV with a usable size of 5GiB, 3 stripes, a stripe size of
|
||||
64KiB, using a total of 4 devices (including one for parity).
|
||||
.br
|
||||
.B lvcreate \-\-type raid5 \-L 5G \-i 3 \-I 64 \-n mylv vg00
|
||||
|
||||
Create a RAID5 LV using all of the free space in the VG and spanning all the
|
||||
PVs in the VG (note that the command will fail if there are more than 8 PVs in
|
||||
the VG, in which case \fB\-i 7\fP must be used to get to the current maximum of
|
||||
8 devices including parity for RaidLVs).
|
||||
.br
|
||||
.B lvcreate \-\-config allocation/raid_stripe_all_devices=1
|
||||
.RS
|
||||
.B \-\-type raid5 \-l 100%FREE \-n mylv vg00
|
||||
.RE
|
||||
|
||||
Create RAID10 LV with a usable size of 5GiB, using 2 stripes, each on
|
||||
a two-image mirror. (Note that the \fB-i\fP and \fB-m\fP arguments behave
|
||||
differently:
|
||||
\fB-i\fP specifies the total number of stripes,
|
||||
but \fB-m\fP specifies the number of images in addition
|
||||
to the first image).
|
||||
.br
|
||||
.B lvcreate \-\-type raid10 \-L 5G \-i 2 \-m 1 \-n mylv vg00
|
||||
|
||||
Create a 1TiB thin LV, first creating a new thin pool for it, where
|
||||
the thin pool has 100MiB of space, uses 2 stripes, has a 64KiB stripe
|
||||
size, and 256KiB chunk size.
|
||||
.br
|
||||
.B lvcreate \-\-type thin \-\-name mylv \-\-thinpool mypool
|
||||
.RS
|
||||
.B \-V 1t \-L 100m \-i 2 \-I 64 \-c 256 vg00
|
||||
.RE
|
||||
|
||||
Create a thin snapshot of a thin LV (the size option must not be
|
||||
used, otherwise a copy-on-write snapshot would be created).
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-name mysnap vg00/thinvol
|
||||
|
||||
Create a thin snapshot of the read-only inactive LV named "origin"
|
||||
which becomes an external origin for the thin snapshot LV.
|
||||
.br
|
||||
.B lvcreate \-\-snapshot \-\-name mysnap \-\-thinpool mypool vg00/origin
|
||||
|
||||
Create a cache pool from a fast physical device. The cache pool can
|
||||
then be used to cache an LV.
|
||||
.br
|
||||
.B lvcreate \-\-type cache-pool \-L 1G \-n my_cpool vg00 /dev/fast1
|
||||
|
||||
Create a cache LV, first creating a new origin LV on a slow physical device,
|
||||
then combining the new origin LV with an existing cache pool.
|
||||
.br
|
||||
.B lvcreate \-\-type cache \-\-cachepool my_cpool
|
||||
.RS
|
||||
.B \-L 100G \-n mylv vg00 /dev/slow1
|
||||
.RE
|
||||
|
||||
@@ -1,914 +0,0 @@
|
||||
.TH LVCREATE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.
|
||||
.\" Use 1st. parameter with \% to fix 'man2html' rendeing on same line!
|
||||
.de SIZE_G
|
||||
. IR \\$1 \c
|
||||
. RB [ b | B | s | S | k | K | m | M | g | G ]
|
||||
..
|
||||
.de SIZE_E
|
||||
. IR \\$1 \c
|
||||
. RB [ b | B | s | S | k | K | m | M | \c
|
||||
. BR g | G | t | T | p | P | e | E ]
|
||||
..
|
||||
.
|
||||
.SH NAME
|
||||
.
|
||||
lvcreate \- create a logical volume in an existing volume group
|
||||
.
|
||||
.SH SYNOPSIS
|
||||
.
|
||||
.ad l
|
||||
.B lvcreate
|
||||
.RB [ \-a | \-\-activate
|
||||
.RB [ a ][ e | l | s ]{ y | n }]
|
||||
.RB [ \-\-addtag
|
||||
.IR Tag ]
|
||||
.RB [ \-\-alloc
|
||||
.IR Allocation\%Policy ]
|
||||
.RB [ \-A | \-\-autobackup
|
||||
.RB { y | n }]
|
||||
.RB [ \-H | \-\-cache ]
|
||||
.RB [ \-\-cachemode
|
||||
.RB { passthrough | writeback | writethrough }]
|
||||
.RB [ \-\-cachepolicy
|
||||
.IR Policy ]
|
||||
.RB \%[ \-\-cachepool
|
||||
.IR CachePoolLogicalVolume ]
|
||||
.RB [ \-\-cachesettings
|
||||
.IR Key \fB= Value ]
|
||||
.RB [ \-c | \-\-chunksize
|
||||
.IR ChunkSize ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB \%[ \-C | \-\-contiguous
|
||||
.RB { y | n }]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-\-discards
|
||||
.RB \%{ ignore | nopassdown | passdown }]
|
||||
.RB [ \-\-errorwhenfull
|
||||
.RB { y | n }]
|
||||
.RB [{ \-l | \-\-extents
|
||||
.BR \fILogicalExtents\%Number [ % { FREE | PVS | VG }]
|
||||
.RB |
|
||||
.BR \-L | \-\-size
|
||||
.BR \fILogicalVolumeSize }
|
||||
.RB [ \-i | \-\-stripes
|
||||
.IR Stripes
|
||||
.RB [ \-I | \-\-stripesize
|
||||
.IR StripeSize ]]]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-K | \-\-ignoreactivationskip ]
|
||||
.RB [ \-\-ignoremonitoring ]
|
||||
.RB [ \-\-minor
|
||||
.IR Minor
|
||||
.RB [ \-j | \-\-major
|
||||
.IR Major ]]
|
||||
.RB [ \-\-metadataprofile
|
||||
.IR Profile\%Name ]
|
||||
.RB [ \-m | \-\-mirrors
|
||||
.IR Mirrors
|
||||
.RB [ \-\-corelog | \-\-mirrorlog
|
||||
.RB { disk | core | mirrored }]
|
||||
.RB [ \-\-nosync ]
|
||||
.RB [ \-R | \-\-regionsize
|
||||
.BR \fIMirrorLogRegionSize ]]
|
||||
.RB [ \-\-monitor
|
||||
.RB { y | n }]
|
||||
.RB [ \-n | \-\-name
|
||||
.IR Logical\%Volume ]
|
||||
.RB [ \-\-noudevsync ]
|
||||
.RB [ \-p | \-\-permission
|
||||
.RB { r | rw }]
|
||||
.RB [ \-M | \-\-persistent
|
||||
.RB { y | n }]
|
||||
.\" .RB [ \-\-pooldatasize
|
||||
.\" .I DataVolumeSize
|
||||
.RB \%[ \-\-poolmetadatasize
|
||||
.IR MetadataVolumeSize ]
|
||||
.RB [ \-\-poolmetadataspare
|
||||
.RB { y | n }]
|
||||
.RB [ \-\- [ raid ] maxrecoveryrate
|
||||
.IR Rate ]
|
||||
.RB [ \-\- [ raid ] minrecoveryrate
|
||||
.IR Rate ]
|
||||
.RB [ \-r | \-\-readahead
|
||||
.RB { \fIReadAheadSectors | auto | none }]
|
||||
.RB [ \-\-reportformat
|
||||
.RB {basic | json}]
|
||||
.RB \%[ \-k | \-\-setactivationskip
|
||||
.RB { y | n }]
|
||||
.RB [ \-s | \-\-snapshot ]
|
||||
.RB [ \-V | \-\-virtualsize
|
||||
.IR VirtualSize ]
|
||||
.RB [ \-t | \-\-test ]
|
||||
.RB [ \-T | \-\-thin ]
|
||||
.RB [ \-\-thinpool
|
||||
.IR ThinPoolLogicalVolume ]
|
||||
.RB [ \-\-type
|
||||
.IR SegmentType ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-W | \-\-wipesignatures
|
||||
.RB { y | n }]
|
||||
.RB [ \-Z | \-\-zero
|
||||
.RB { y | n }]
|
||||
.RI [ VolumeGroup
|
||||
.RI |
|
||||
.RI \%{ ExternalOrigin | Origin | Pool } LogicalVolume
|
||||
.RI \%[ PhysicalVolumePath [ \fB: \fIPE \fR[ \fB\- PE ]]...]]
|
||||
.LP
|
||||
.B lvcreate
|
||||
.RB [ \-l | \-\-extents
|
||||
.BR \fILogicalExtentsNumber [ % { FREE | ORIGIN | PVS | VG }]
|
||||
|
|
||||
.BR \-L | \-\-size
|
||||
.\" | \-\-pooldatasize
|
||||
.IR LogicalVolumeSize ]
|
||||
.RB [ \-c | \-\-chunksize
|
||||
.IR ChunkSize ]
|
||||
.RB \%[ \-\-commandprofile
|
||||
.IR Profile\%Name ]
|
||||
.RB [ \-\-noudevsync ]
|
||||
.RB [ \-\-ignoremonitoring ]
|
||||
.RB [ \-\-metadataprofile
|
||||
.IR Profile\%Name ]
|
||||
.RB \%[ \-\-monitor
|
||||
.RB { y | n }]
|
||||
.RB [ \-n | \-\-name
|
||||
.IR SnapshotLogicalVolumeName ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB {basic | json}]
|
||||
.BR \-s | \-\-snapshot | \-H | \-\-cache
|
||||
.RI \%{[ VolumeGroup \fB/\fP] OriginalLogicalVolume
|
||||
.RB \%[ \-V | \-\-virtualsize
|
||||
.IR VirtualSize ]}
|
||||
.ad b
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
.
|
||||
lvcreate creates a new logical volume in a volume group (see
|
||||
.BR vgcreate "(8), " vgchange (8))
|
||||
by allocating logical extents from the free physical extent pool
|
||||
of that volume group. If there are not enough free physical extents then
|
||||
the volume group can be extended (see
|
||||
.BR vgextend (8))
|
||||
with other physical volumes or by reducing existing logical volumes
|
||||
of this volume group in size (see
|
||||
.BR lvreduce (8)).
|
||||
If you specify one or more PhysicalVolumes, allocation of physical
|
||||
extents will be restricted to these volumes.
|
||||
.br
|
||||
.br
|
||||
The second form supports the creation of snapshot logical volumes which
|
||||
keep the contents of the original logical volume for backup purposes.
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
See
|
||||
.BR lvm (8)
|
||||
for common options.
|
||||
.
|
||||
.HP
|
||||
.BR \-a | \-\-activate
|
||||
.RB [ a ][ l | e | s ]{ y | n }
|
||||
.br
|
||||
Controls the availability of the Logical Volumes for immediate use after
|
||||
the command finishes running.
|
||||
By default, new Logical Volumes are activated (\fB\-ay\fP).
|
||||
If it is possible technically, \fB\-an\fP will leave the new Logical
|
||||
Volume inactive. But for example, snapshots of active origin can only be
|
||||
created in the active state so \fB\-an\fP cannot be used with
|
||||
\fB-\-type snapshot\fP. This does not apply to thin volume snapshots,
|
||||
which are by default created with flag to skip their activation
|
||||
(\fB-ky\fP).
|
||||
Normally the \fB\-\-zero n\fP argument has to be supplied too because
|
||||
zeroing (the default behaviour) also requires activation.
|
||||
If autoactivation option is used (\fB\-aay\fP), the logical volume is
|
||||
activated only if it matches an item in the
|
||||
\fBactivation/auto_activation_volume_list\fP
|
||||
set in \fBlvm.conf\fP(5).
|
||||
For autoactivated logical volumes, \fB\-\-zero n\fP and
|
||||
\fB\-\-wipesignatures n\fP is always assumed and it can't
|
||||
be overridden. If the clustered locking is enabled,
|
||||
\fB\-aey\fP will activate exclusively on one node and
|
||||
.BR \-a { a | l } y
|
||||
will activate only on the local node.
|
||||
.
|
||||
.HP
|
||||
.BR \-H | \-\-cache
|
||||
.br
|
||||
Creates cache or cache pool logical volume.
|
||||
.\" or both.
|
||||
Specifying the optional argument \fB\-\-extents\fP or \fB\-\-size\fP
|
||||
will cause the creation of the cache logical volume.
|
||||
.\" Specifying the optional argument \fB\-\-pooldatasize\fP will cause
|
||||
.\" the creation of the cache pool logical volume.
|
||||
.\" Specifying both arguments will cause the creation of cache with its
|
||||
.\" cache pool volume.
|
||||
When the Volume group name is specified together with existing logical volume
|
||||
name which is NOT a cache pool name, such volume is treated
|
||||
as cache origin volume and cache pool is created. In this case the
|
||||
\fB\-\-extents\fP or \fB\-\-size\fP is used to specify size of cache pool volume.
|
||||
See \fBlvmcache\fP(7) for more info about caching support.
|
||||
Note that the cache segment type requires a dm-cache kernel module version
|
||||
1.3.0 or greater.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachemode
|
||||
.RB { passthrough | writeback | writethrough }
|
||||
.br
|
||||
Specifying a cache mode determines when the writes to a cache LV
|
||||
are considered complete. When \fBwriteback\fP is specified, a write is
|
||||
considered complete as soon as it is stored in the cache pool LV.
|
||||
If \fBwritethough\fP is specified, a write is considered complete only
|
||||
when it has been stored in the cache pool LV and on the origin LV.
|
||||
While \fBwritethrough\fP may be slower for writes, it is more
|
||||
resilient if something should happen to a device associated with the
|
||||
cache pool LV. With \fBpassthrough\fP mode, all reads are served
|
||||
from origin LV (all reads miss the cache) and all writes are
|
||||
forwarded to the origin LV; additionally, write hits cause cache
|
||||
block invalidates. See \fBlvmcache(7)\fP for more details.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachepolicy
|
||||
.IR Policy
|
||||
.br
|
||||
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
|
||||
the cache policy. \fBmq\fP is the basic policy name. \fBsmq\fP is more advanced
|
||||
version available in newer kernels.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachepool
|
||||
.IR CachePoolLogicalVolume { Name | Path }
|
||||
.br
|
||||
Specifies the name of cache pool volume name. The other way to specify pool name
|
||||
is to append name to Volume group name argument.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-cachesettings
|
||||
.IB Key = Value
|
||||
.br
|
||||
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
|
||||
the cache tunable settings. In most use-cases, default values should be adequate.
|
||||
Special string value \fBdefault\fP switches setting back to its default kernel value
|
||||
and removes it from the list of settings stored in lvm2 metadata.
|
||||
.
|
||||
.HP
|
||||
.BR \-c | \-\-chunksize
|
||||
.SIZE_G \%ChunkSize
|
||||
.br
|
||||
Gives the size of chunk for snapshot, cache pool and thin pool logical volumes.
|
||||
Default unit is in kilobytes.
|
||||
.br
|
||||
For snapshots the value must be power of 2 between 4KiB and 512KiB
|
||||
and the default value is 4KiB.
|
||||
.br
|
||||
For cache pools the value must a multiple of 32KiB
|
||||
between 32KiB and 1GiB. The default is 64KiB.
|
||||
When the size is specified with volume caching, it may not be smaller
|
||||
than cache pool creation chunk size was.
|
||||
.br
|
||||
For thin pools the value must be a multiple of 64KiB
|
||||
between 64KiB and 1GiB.
|
||||
Default value starts with 64KiB and grows up to
|
||||
fit the pool metadata size within 128MiB,
|
||||
if the pool metadata size is not specified.
|
||||
See
|
||||
.BR lvm.conf (5)
|
||||
setting \fBallocation/thin_pool_chunk_size_policy\fP
|
||||
to select different calculation policy.
|
||||
Thin pool target version <1.4 requires this value to be a power of 2.
|
||||
For target version <1.5 discard is not supported for non power of 2 values.
|
||||
.
|
||||
.HP
|
||||
.BR \-C | \-\-contiguous
|
||||
.RB { y | n }
|
||||
.br
|
||||
Sets or resets the contiguous allocation policy for
|
||||
logical volumes. Default is no contiguous allocation based
|
||||
on a next free principle.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-corelog
|
||||
.br
|
||||
This is shortcut for option \fB\-\-mirrorlog core\fP.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-discards
|
||||
.RB { ignore | nopassdown | passdown }
|
||||
.br
|
||||
Sets discards behavior for thin pool.
|
||||
Default is \fBpassdown\fP.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-errorwhenfull
|
||||
.RB { y | n }
|
||||
.br
|
||||
Configures thin pool behaviour when data space is exhausted.
|
||||
Default is \fBn\fPo.
|
||||
Device will queue I/O operations until target timeout
|
||||
(see dm-thin-pool kernel module option \fPno_space_timeout\fP)
|
||||
expires. Thus configured system has a time to i.e. extend
|
||||
the size of thin pool data device.
|
||||
When set to \fBy\fPes, the I/O operation is immeditelly errored.
|
||||
.
|
||||
.HP
|
||||
.BR \-K | \-\-ignoreactivationskip
|
||||
.br
|
||||
Ignore the flag to skip Logical Volumes during activation.
|
||||
Use \fB\-\-setactivationskip\fP option to set or reset
|
||||
activation skipping flag persistently for logical volume.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-ignoremonitoring
|
||||
.br
|
||||
Make no attempt to interact with dmeventd unless \fB\-\-monitor\fP
|
||||
is specified.
|
||||
.
|
||||
.HP
|
||||
.BR -l | \-\-extents
|
||||
.IR LogicalExtentsNumber \c
|
||||
.RB [ % { VG | PVS | FREE | ORIGIN }]
|
||||
.br
|
||||
Specifies the size of the new LV in logical extents. The number of
|
||||
physical extents allocated may be different, and depends on the LV type.
|
||||
Certain LV types require more physical extents for data redundancy or
|
||||
metadata. An alternate syntax allows the size to be determined indirectly
|
||||
as a percentage of the size of a related VG, LV, or set of PVs. The
|
||||
suffix \fB%VG\fP denotes the total size of the VG, the suffix \fB%FREE\fP
|
||||
the remaining free space in the VG, and the suffix \fB%PVS\fP the free
|
||||
space in the specified Physical Volumes. For a snapshot, the size
|
||||
can be expressed as a percentage of the total size of the Origin Logical
|
||||
Volume with the suffix \fB%ORIGIN\fP (\fB100%ORIGIN\fP provides space for
|
||||
the whole origin).
|
||||
When expressed as a percentage, the size defines an upper limit for the
|
||||
number of logical extents in the new LV. The precise number of logical
|
||||
extents in the new LV is not determined until the command has completed.
|
||||
.
|
||||
.HP
|
||||
.BR \-j | \-\-major
|
||||
.IR Major
|
||||
.br
|
||||
Sets the major number.
|
||||
Major numbers are not supported with pool volumes.
|
||||
This option is supported only on older systems
|
||||
(kernel version 2.4) and is ignored on modern Linux systems where major
|
||||
numbers are dynamically assigned.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-metadataprofile
|
||||
.IR ProfileName
|
||||
.br
|
||||
Uses and attaches the \fIProfileName\fP configuration profile to the logical
|
||||
volume metadata. Whenever the logical volume is processed next time,
|
||||
the profile is automatically applied. If the volume group has another
|
||||
profile attached, the logical volume profile is preferred.
|
||||
See \fBlvm.conf\fP(5) for more information about \fBmetadata profiles\fP.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-minor
|
||||
.IR Minor
|
||||
.br
|
||||
Sets the minor number.
|
||||
Minor numbers are not supported with pool volumes.
|
||||
.
|
||||
.HP
|
||||
.BR \-m | \-\-mirrors
|
||||
.IR mirrors
|
||||
.br
|
||||
Creates a mirrored logical volume with \fImirrors\fP copies.
|
||||
For example, specifying \fB\-m 1\fP
|
||||
would result in a mirror with two-sides; that is,
|
||||
a linear volume plus one copy.
|
||||
|
||||
Specifying the optional argument \fB\-\-nosync\fP will cause the creation
|
||||
of the mirror LV to skip the initial resynchronization. Any data written
|
||||
afterwards will be mirrored, but the original contents will not be copied.
|
||||
|
||||
This is useful for skipping a potentially long and resource intensive initial
|
||||
sync of an empty mirrored RaidLV.
|
||||
|
||||
There are two implementations of mirroring which can be used and correspond
|
||||
to the "\fIraid1\fP" and "\fImirror\fP" segment types.
|
||||
The default is "\fIraid1\fP". See the
|
||||
\fB\-\-type\fP option for more information if you would like to use the
|
||||
legacy "\fImirror\fP" segment type. See
|
||||
.BR lvm.conf (5)
|
||||
settings \fB global/mirror_segtype_default\fP
|
||||
and \fBglobal/raid10_segtype_default\fP
|
||||
to configure default mirror segment type.
|
||||
The options
|
||||
\fB\-\-mirrorlog\fP and \fB\-\-corelog\fP apply
|
||||
to the legacy "\fImirror\fP" segment type only.
|
||||
|
||||
Note the current maxima for mirrors are 7 for "mirror" providing
|
||||
8 mirror legs and 9 for "raid1" providing 10 legs.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-mirrorlog
|
||||
.RB { disk | core | mirrored }
|
||||
.br
|
||||
Specifies the type of log to be used for logical volumes utilizing
|
||||
the legacy "\fImirror\fP" segment type.
|
||||
.br
|
||||
The default is \fBdisk\fP, which is persistent and requires
|
||||
a small amount of storage space, usually on a separate device from the
|
||||
data being mirrored.
|
||||
.br
|
||||
Using \fBcore\fP means the mirror is regenerated by copying the data
|
||||
from the first device each time the logical volume is activated,
|
||||
like after every reboot.
|
||||
.br
|
||||
Using \fBmirrored\fP will create a persistent log that is itself mirrored.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-monitor
|
||||
.RB { y | n }
|
||||
.br
|
||||
Starts or avoids monitoring a mirrored, snapshot or thin pool logical volume with
|
||||
dmeventd, if it is installed.
|
||||
If a device used by a monitored mirror reports an I/O error,
|
||||
the failure is handled according to
|
||||
\fBactivation/mirror_image_fault_policy\fP
|
||||
and \fBactivation/mirror_log_fault_policy\fP
|
||||
set in \fBlvm.conf\fP(5).
|
||||
.
|
||||
.HP
|
||||
.BR \-n | \-\-name
|
||||
.IR LogicalVolume { Name | Path }
|
||||
.br
|
||||
Sets the name for the new logical volume.
|
||||
.br
|
||||
Without this option a default name of "lvol#" will be generated where
|
||||
# is the LVM internal number of the logical volume.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-nosync
|
||||
.br
|
||||
Causes the creation of mirror, raid1, raid4, raid5 and raid10 to skip the
|
||||
initial resynchronization. In case of mirror, raid1 and raid10, any data
|
||||
written afterwards will be mirrored, but the original contents will not be
|
||||
copied. In case of raid4 and raid5, no parity blocks will be written,
|
||||
though any data written afterwards will cause parity blocks to be stored.
|
||||
.br
|
||||
This is useful for skipping a potentially long and resource intensive initial
|
||||
sync of an empty mirror/raid1/raid4/raid5 and raid10 LV.
|
||||
.br
|
||||
This option is not valid for raid6, because raid6 relies on proper parity
|
||||
(P and Q Syndromes) being created during initial synchronization in order
|
||||
to reconstruct proper user date in case of device failures.
|
||||
|
||||
raid0 and raid0_meta don't provide any data copies or parity support
|
||||
and thus don't support initial resynchronization.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-noudevsync
|
||||
.br
|
||||
Disables udev synchronisation. The
|
||||
process will not wait for notification from udev.
|
||||
It will continue irrespective of any possible udev processing
|
||||
in the background. You should only use this if udev is not running
|
||||
or has rules that ignore the devices LVM2 creates.
|
||||
.
|
||||
.HP
|
||||
.BR \-p | \-\-permission
|
||||
.RB { r | rw }
|
||||
.br
|
||||
Sets access permissions to read only (\fBr\fP) or read and write (\fBrw\fP).
|
||||
.br
|
||||
Default is read and write.
|
||||
.
|
||||
.HP
|
||||
.BR \-M | \-\-persistent
|
||||
.RB { y | n }
|
||||
.br
|
||||
Set to \fBy\fP to make the minor number specified persistent.
|
||||
Pool volumes cannot have persistent major and minor numbers.
|
||||
Defaults to \fBy\fPes only when major or minor number is specified.
|
||||
Otherwise it is \fBn\fPo.
|
||||
.\" .HP
|
||||
.\" .IR \fB\-\-pooldatasize " " PoolDataVolumeSize [ bBsSkKmMgGtTpPeE ]
|
||||
.\" Sets the size of pool's data logical volume.
|
||||
.\" For thin pools you may also specify the size
|
||||
.\" with the option \fB\-\-size\fP.
|
||||
.\"
|
||||
.
|
||||
.HP
|
||||
.BR \-\-poolmetadatasize
|
||||
.SIZE_G \%MetadataVolumeSize
|
||||
.br
|
||||
Sets the size of pool's metadata logical volume.
|
||||
Supported values are in range between 2MiB and 16GiB for thin pool,
|
||||
and upto 16GiB for cache pool. The minimum value is computed from pool's
|
||||
data size.
|
||||
Default value for thin pool is (Pool_LV_size / Pool_LV_chunk_size * 64b).
|
||||
To work with a thin pool, there should be at least 25% of free space
|
||||
when the size of metadata is smaller then 16MiB,
|
||||
or at least 4MiB of free space otherwise.
|
||||
Default unit is megabytes.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-poolmetadataspare
|
||||
.RB { y | n }
|
||||
.br
|
||||
Controls creation and maintanence of pool metadata spare logical volume
|
||||
that will be used for automated pool recovery.
|
||||
Only one such volume is maintained within a volume group
|
||||
with the size of the biggest pool metadata volume.
|
||||
Default is \fBy\fPes.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] maxrecoveryrate
|
||||
.SIZE_G \%Rate
|
||||
.br
|
||||
Sets the maximum recovery rate for a RAID logical volume. \fIRate\fP
|
||||
is specified as an amount per second for each device in the array.
|
||||
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
||||
recovery rate to 0 means it will be unbounded.
|
||||
.
|
||||
.HP
|
||||
.BR \-\- [ raid ] minrecoveryrate
|
||||
.SIZE_G \%Rate
|
||||
.br
|
||||
Sets the minimum recovery rate for a RAID logical volume. \fIRate\fP
|
||||
is specified as an amount per second for each device in the array.
|
||||
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
||||
recovery rate to 0 means it will be unbounded.
|
||||
.
|
||||
.HP
|
||||
.BR \-r | \-\-readahead
|
||||
.RB { \fIReadAheadSectors | auto | none }
|
||||
.br
|
||||
Sets read ahead sector count of this logical volume.
|
||||
For volume groups with metadata in lvm1 format, this must
|
||||
be a value between 2 and 120.
|
||||
The default value is \fBauto\fP which allows the kernel to choose
|
||||
a suitable value automatically.
|
||||
\fBnone\fP is equivalent to specifying zero.
|
||||
.
|
||||
.HP
|
||||
.BR \-R | \-\-regionsize
|
||||
.SIZE_G \%MirrorLogRegionSize
|
||||
.br
|
||||
A mirror is divided into regions of this size (in MiB), and the mirror log
|
||||
uses this granularity to track which regions are in sync.
|
||||
.
|
||||
.HP
|
||||
.BR \-k | \-\-setactivationskip
|
||||
.RB { y | n }
|
||||
.br
|
||||
Controls whether Logical Volumes are persistently flagged to be skipped during
|
||||
activation. By default, thin snapshot volumes are flagged for activation skip.
|
||||
See
|
||||
.BR lvm.conf (5)
|
||||
\fBactivation/auto_set_activation_skip\fP
|
||||
how to change its default behaviour.
|
||||
To activate such volumes, an extra \fB\-\-ignoreactivationskip\fP
|
||||
option must be used. The flag is not applied during deactivation. Use
|
||||
\fBlvchange \-\-setactivationskip\fP
|
||||
command to change the skip flag for existing volumes.
|
||||
To see whether the flag is attached, use \fBlvs\fP command
|
||||
where the state of the flag is reported within \fBlv_attr\fP bits.
|
||||
.
|
||||
.HP
|
||||
.BR \-L | \-\-size
|
||||
.SIZE_E \%LogicalVolumeSize
|
||||
.br
|
||||
Gives the size to allocate for the new logical volume.
|
||||
A size suffix of \fBB\fP for bytes, \fBS\fP for sectors as 512 bytes,
|
||||
\fBK\fP for kilobytes, \fBM\fP for megabytes,
|
||||
\fBG\fP for gigabytes, \fBT\fP for terabytes, \fBP\fP for petabytes
|
||||
or \fBE\fP for exabytes is optional.
|
||||
.br
|
||||
Default unit is megabytes.
|
||||
.
|
||||
.HP
|
||||
.BR \-s | \fB\-\-snapshot
|
||||
.IR OriginalLogicalVolume { Name | Path }
|
||||
.br
|
||||
Creates a snapshot logical volume (or snapshot) for an existing, so called
|
||||
original logical volume (or origin).
|
||||
Snapshots provide a 'frozen image' of the contents of the origin
|
||||
while the origin can still be updated. They enable consistent
|
||||
backups and online recovery of removed/overwritten data/files.
|
||||
.br
|
||||
Thin snapshot is created when the origin is a thin volume and
|
||||
the size IS NOT specified. Thin snapshot shares same blocks within
|
||||
the thin pool volume.
|
||||
The non thin volume snapshot with the specified size does not need
|
||||
the same amount of storage the origin has. In a typical scenario,
|
||||
15-20% might be enough. In case the snapshot runs out of storage, use
|
||||
.BR lvextend (8)
|
||||
to grow it. Shrinking a snapshot is supported by
|
||||
.BR lvreduce (8)
|
||||
as well. Run
|
||||
.BR lvs (8)
|
||||
on the snapshot in order to check how much data is allocated to it.
|
||||
Note: a small amount of the space you allocate to the snapshot is
|
||||
used to track the locations of the chunks of data, so you should
|
||||
allocate slightly more space than you actually need and monitor
|
||||
(\fB\-\-monitor\fP) the rate at which the snapshot data is growing
|
||||
so you can \fBavoid\fP running out of space.
|
||||
If \fB\-\-thinpool\fP is specified, thin volume is created that will
|
||||
use given original logical volume as an external origin that
|
||||
serves unprovisioned blocks.
|
||||
Only read-only volumes can be used as external origins.
|
||||
To make the volume external origin, lvm expects the volume to be inactive.
|
||||
External origin volume can be used/shared for many thin volumes
|
||||
even from different thin pools. See
|
||||
.BR lvconvert (8)
|
||||
for online conversion to thin volumes with external origin.
|
||||
.
|
||||
.HP
|
||||
.BR \-i | \-\-stripes
|
||||
.IR Stripes
|
||||
.br
|
||||
Gives the number of stripes.
|
||||
This is equal to the number of physical volumes to scatter
|
||||
the logical volume data. When creating a RAID 4/5/6 logical volume,
|
||||
the extra devices which are necessary for parity are
|
||||
internally accounted for. Specifying \fB\-i 3\fP
|
||||
would cause 3 devices for striped and RAID 0 logical volumes,
|
||||
4 devices for RAID 4/5, 5 devices for RAID 6 and 6 devices for RAID 10.
|
||||
Alternatively, RAID 0 will stripe across 2 devices,
|
||||
RAID 4/5 across 3 PVs, RAID 6 across 5 PVs and RAID 10 across
|
||||
4 PVs in the volume group if the \fB\-i\fP argument is omitted.
|
||||
In order to stripe across all PVs of the VG if the \fB\-i\fP argument is
|
||||
omitted, set raid_stripe_all_devices=1 in the allocation
|
||||
section of \fBlvm.conf (5)\fP or add
|
||||
.br
|
||||
\fB\-\-config allocation/raid_stripe_all_devices=1\fP
|
||||
.br
|
||||
to the command.
|
||||
|
||||
Note the current maxima for stripes depend on the created RAID type.
|
||||
For raid10, the maximum of stripes is 32,
|
||||
for raid0, it is 64,
|
||||
for raid4/5, it is 63
|
||||
and for raid6 it is 62.
|
||||
|
||||
See the \fB\-\-nosync\fP option to optionally avoid initial syncrhonization of RaidLVs.
|
||||
|
||||
Two implementations of basic striping are available in the kernel.
|
||||
The original device-mapper implementation is the default and should
|
||||
normally be used. The alternative implementation using MD, available
|
||||
since version 1.7 of the RAID device-mapper kernel target (kernel
|
||||
version 4.2) is provided to facilitate the development of new RAID
|
||||
features. It may be accessed with \fB--type raid0[_meta]\fP, but is best
|
||||
avoided at present because of assorted restrictions on resizing and converting
|
||||
such devices.
|
||||
.HP
|
||||
.BR \-I | \-\-stripesize
|
||||
.IR StripeSize
|
||||
.br
|
||||
Gives the number of kilobytes for the granularity of the stripes.
|
||||
.br
|
||||
StripeSize must be 2^n (n = 2 to 9) for metadata in LVM1 format.
|
||||
For metadata in LVM2 format, the stripe size may be a larger
|
||||
power of 2 but must not exceed the physical extent size.
|
||||
.
|
||||
.HP
|
||||
.BR \-T | \-\-thin
|
||||
.br
|
||||
Creates thin pool or thin logical volume or both.
|
||||
Specifying the optional argument \fB\-\-size\fP or \fB\-\-extents\fP
|
||||
will cause the creation of the thin pool logical volume.
|
||||
Specifying the optional argument \fB\-\-virtualsize\fP will cause
|
||||
the creation of the thin logical volume from given thin pool volume.
|
||||
Specifying both arguments will cause the creation of both
|
||||
thin pool and thin volume using this pool.
|
||||
See \fBlvmthin\fP(7) for more info about thin provisioning support.
|
||||
Thin provisioning requires device mapper kernel driver
|
||||
from kernel 3.2 or greater.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-thinpool
|
||||
.IR ThinPoolLogicalVolume { Name | Path }
|
||||
.br
|
||||
Specifies the name of thin pool volume name. The other way to specify pool name
|
||||
is to append name to Volume group name argument.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-type
|
||||
.IR SegmentType
|
||||
.br
|
||||
Creates a logical volume with the specified segment type.
|
||||
Supported types are:
|
||||
.BR cache ,
|
||||
.BR cache-pool ,
|
||||
.BR error ,
|
||||
.BR linear ,
|
||||
.BR mirror,
|
||||
.BR raid0 ,
|
||||
.BR raid1 ,
|
||||
.BR raid4 ,
|
||||
.BR raid5_la ,
|
||||
.BR raid5_ls
|
||||
.RB (=
|
||||
.BR raid5 ),
|
||||
.BR raid5_ra ,
|
||||
.BR raid5_rs ,
|
||||
.BR raid6_nc ,
|
||||
.BR raid6_nr ,
|
||||
.BR raid6_zr
|
||||
.RB (=
|
||||
.BR raid6 ),
|
||||
.BR raid10 ,
|
||||
.BR snapshot ,
|
||||
.BR striped,
|
||||
.BR thin ,
|
||||
.BR thin-pool
|
||||
or
|
||||
.BR zero .
|
||||
Segment type may have a commandline switch alias that will
|
||||
enable its use.
|
||||
When the type is not explicitly specified an implicit type
|
||||
is selected from combination of options:
|
||||
.BR \-H | \-\-cache | \-\-cachepool
|
||||
(cache or cachepool),
|
||||
.BR \-T | \-\-thin | \-\-thinpool
|
||||
(thin or thinpool),
|
||||
.BR \-m | \-\-mirrors
|
||||
(raid1 or mirror),
|
||||
.BR \-s | \-\-snapshot | \-V | \-\-virtualsize
|
||||
(snapshot or thin),
|
||||
.BR \-i | \-\-stripes
|
||||
(striped).
|
||||
The default segment type is \fBlinear\fP.
|
||||
.
|
||||
.HP
|
||||
.BR \-V | \-\-virtualsize
|
||||
.SIZE_E \%VirtualSize
|
||||
.br
|
||||
Creates a thinly provisioned device or a sparse device of the given size (in MiB by default).
|
||||
See
|
||||
.BR lvm.conf (5)
|
||||
settings \fBglobal/sparse_segtype_default\fP
|
||||
to configure default sparse segment type.
|
||||
See \fBlvmthin\fP(7) for more info about thin provisioning support.
|
||||
Anything written to a sparse snapshot will be returned when reading from it.
|
||||
Reading from other areas of the device will return blocks of zeros.
|
||||
Virtual snapshot (sparse snapshot) is implemented by creating
|
||||
a hidden virtual device of the requested size using the zero target.
|
||||
A suffix of _vorigin is used for this device.
|
||||
Note: using sparse snapshots is not efficient for larger
|
||||
device sizes (GiB), thin provisioning should be used for this case.
|
||||
.
|
||||
.HP
|
||||
.BR \-W | \-\-wipesignatures
|
||||
.RB { y | n }
|
||||
.br
|
||||
Controls detection and subsequent wiping of signatures on newly created
|
||||
Logical Volume. There's a prompt for each signature detected to confirm
|
||||
its wiping (unless \fB--yes\fP is used where LVM assumes 'yes' answer
|
||||
for each prompt automatically). If this option is not specified, then by
|
||||
default \fB-W\fP | \fB--wipesignatures y\fP is assumed each time the
|
||||
zeroing is done (\fB\-Z\fP | \fB\-\-zero y\fP). This default behaviour
|
||||
can be controlled by \fB\%allocation/wipe_signatures_when_zeroing_new_lvs\fP
|
||||
setting found in
|
||||
.BR lvm.conf (5).
|
||||
.br
|
||||
If blkid wiping is used (\fBallocation/use_blkid_wiping\fP setting in
|
||||
.BR lvm.conf (5))
|
||||
and LVM2 is compiled with blkid wiping support, then \fBblkid\fP(8) library is used
|
||||
to detect the signatures (use \fBblkid \-k\fP command to list the signatures that are recognized).
|
||||
Otherwise, native LVM2 code is used to detect signatures (MD RAID, swap and LUKS
|
||||
signatures are detected only in this case).
|
||||
.br
|
||||
Logical volume is not wiped if the read only flag is set.
|
||||
.
|
||||
.HP
|
||||
.BR \-Z | \-\-zero
|
||||
.RB { y | n }
|
||||
.br
|
||||
Controls zeroing of the first 4KiB of data in the new logical volume.
|
||||
Default is \fBy\fPes.
|
||||
Snapshot COW volumes are always zeroed.
|
||||
Logical volume is not zeroed if the read only flag is set.
|
||||
.br
|
||||
Warning: trying to mount an unzeroed logical volume can cause the system to
|
||||
hang.
|
||||
.
|
||||
.SH Examples
|
||||
.
|
||||
Creates a striped logical volume with 3 stripes, a stripe size of 8KiB
|
||||
and a size of 100MiB in the volume group named vg00.
|
||||
The logical volume name will be chosen by lvcreate:
|
||||
.sp
|
||||
.B lvcreate \-i 3 \-I 8 \-L 100M vg00
|
||||
|
||||
Creates a mirror logical volume with 2 sides with a useable size of 500 MiB.
|
||||
This operation would require 3 devices (or option
|
||||
\fB\-\-alloc \%anywhere\fP) - two for the mirror
|
||||
devices and one for the disk log:
|
||||
.sp
|
||||
.B lvcreate \-m1 \-L 500M vg00
|
||||
|
||||
Creates a mirror logical volume with 2 sides with a useable size of 500 MiB.
|
||||
This operation would require 2 devices - the log is "in-memory":
|
||||
.sp
|
||||
.B lvcreate \-m1 \-\-mirrorlog core \-L 500M vg00
|
||||
|
||||
Creates a snapshot logical volume named "vg00/snap" which has access to the
|
||||
contents of the original logical volume named "vg00/lvol1"
|
||||
at snapshot logical volume creation time. If the original logical volume
|
||||
contains a file system, you can mount the snapshot logical volume on an
|
||||
arbitrary directory in order to access the contents of the filesystem to run
|
||||
a backup while the original filesystem continues to get updated:
|
||||
.sp
|
||||
.B lvcreate \-\-size 100m \-\-snapshot \-\-name snap /dev/vg00/lvol1
|
||||
|
||||
Creates a snapshot logical volume named "vg00/snap" with size
|
||||
for overwriting 20% of the original logical volume named "vg00/lvol1".:
|
||||
.sp
|
||||
.B lvcreate \-s \-l 20%ORIGIN \-\-name snap vg00/lvol1
|
||||
|
||||
Creates a sparse device named /dev/vg1/sparse of size 1TiB with space for just
|
||||
under 100MiB of actual data on it:
|
||||
.sp
|
||||
.B lvcreate \-\-virtualsize 1T \-\-size 100M \-\-snapshot \-\-name sparse vg1
|
||||
|
||||
Creates a linear logical volume "vg00/lvol1" using physical extents
|
||||
/dev/sda:0\-7 and /dev/sdb:0\-7 for allocation of extents:
|
||||
.sp
|
||||
.B lvcreate \-L 64M \-n lvol1 vg00 /dev/sda:0\-7 /dev/sdb:0\-7
|
||||
|
||||
Creates a 5GiB RAID5 logical volume "vg00/my_lv", with 3 stripes (plus
|
||||
a parity drive for a total of 4 devices) and a stripesize of 64KiB:
|
||||
.sp
|
||||
.B lvcreate \-\-type raid5 \-L 5G \-i 3 \-I 64 \-n my_lv vg00
|
||||
|
||||
Creates a RAID5 logical volume "vg00/my_lv", using all of the free
|
||||
space in the VG and spanning all the PVs in the VG (note that the command
|
||||
will fail if there's more than 8 PVs in the VG in which case \fB\-i 7\fP
|
||||
has to be used to get to the currently possible maximum of
|
||||
8 devices including parity for RaidLVs):
|
||||
.sp
|
||||
.B lvcreate \-\-config allocation/raid_stripe_all_devices=1 \-\-type raid5 \-l 100%FREE \-n my_lv vg00
|
||||
|
||||
Creates a 5GiB RAID10 logical volume "vg00/my_lv", with 2 stripes on
|
||||
2 2-way mirrors. Note that the \fB-i\fP and \fB-m\fP arguments behave
|
||||
differently.
|
||||
The \fB-i\fP specifies the number of stripes.
|
||||
The \fB-m\fP specifies the number of
|
||||
.B additional
|
||||
copies:
|
||||
.sp
|
||||
.B lvcreate \-\-type raid10 \-L 5G \-i 2 \-m 1 \-n my_lv vg00
|
||||
|
||||
Creates 100MiB pool logical volume for thin provisioning
|
||||
build with 2 stripes 64KiB and chunk size 256KiB together with
|
||||
1TiB thin provisioned logical volume "vg00/thin_lv":
|
||||
.sp
|
||||
.B lvcreate \-i 2 \-I 64 \-c 256 \-L100M \-T vg00/pool \-V 1T \-\-name thin_lv
|
||||
|
||||
Creates a thin snapshot volume "thinsnap" of thin volume "thinvol" that
|
||||
will share the same blocks within the thin pool.
|
||||
Note: the size MUST NOT be specified, otherwise the non-thin snapshot
|
||||
is created instead:
|
||||
.sp
|
||||
.B lvcreate \-s vg00/thinvol \-\-name thinsnap
|
||||
|
||||
Creates a thin snapshot volume of read-only inactive volume "origin"
|
||||
which then becomes the thin external origin for the thin snapshot volume
|
||||
in vg00 that will use an existing thin pool "vg00/pool":
|
||||
.sp
|
||||
.B lvcreate \-s \-\-thinpool vg00/pool origin
|
||||
|
||||
Create a cache pool LV that can later be used to cache one
|
||||
logical volume.
|
||||
.sp
|
||||
.B lvcreate \-\-type cache-pool \-L 1G \-n my_lv_cachepool vg /dev/fast1
|
||||
|
||||
If there is an existing cache pool LV, create the large slow
|
||||
device (i.e. the origin LV) and link it to the supplied cache pool LV,
|
||||
creating a cache LV.
|
||||
.sp
|
||||
.B lvcreate \-\-cache \-L 100G \-n my_lv vg/my_lv_cachepool /dev/slow1
|
||||
|
||||
If there is an existing logical volume, create the small and fast
|
||||
cache pool LV and link it to the supplied existing logical
|
||||
volume (i.e. the origin LV), creating a cache LV.
|
||||
.sp
|
||||
.B lvcreate \-\-type cache \-L 1G \-n my_lv_cachepool vg/my_lv /dev/fast1
|
||||
|
||||
.\" Create a 1G cached LV "lvol1" with 10M cache pool "vg00/pool".
|
||||
.\" .sp
|
||||
.\" .B lvcreate \-\-cache \-L 1G \-n lv \-\-pooldatasize 10M vg00/pool
|
||||
.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.nh
|
||||
.BR lvm (8),
|
||||
.BR lvm.conf (5),
|
||||
.BR lvmcache (7),
|
||||
.BR lvmthin (7),
|
||||
.BR lvconvert (8),
|
||||
.BR lvchange (8),
|
||||
.BR lvextend (8),
|
||||
.BR lvreduce (8),
|
||||
.BR lvremove (8),
|
||||
.BR lvrename (8)
|
||||
.BR lvs (8),
|
||||
.BR lvscan (8),
|
||||
.BR vgcreate (8),
|
||||
.BR blkid (8)
|
||||
5
man/lvdisplay.8.des
Normal file
5
man/lvdisplay.8.des
Normal file
@@ -0,0 +1,5 @@
|
||||
lvdisplay shows the attributes of LVs, like size, read/write status,
|
||||
snapshot information, etc.
|
||||
|
||||
\fBlvs\fP(8) is a preferred alternative that shows the same information
|
||||
and more, using a more compact and configurable output format.
|
||||
@@ -1,134 +0,0 @@
|
||||
.TH LVDISPLAY 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvdisplay \(em display attributes of a logical volume
|
||||
.SH SYNOPSIS
|
||||
.B lvdisplay
|
||||
.RB [ \-a | \-\-all ]
|
||||
.RB [ \-c | \-\-colon ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-\-ignorelockingfailure ]
|
||||
.RB [ \-\-ignoreskippedcluster ]
|
||||
.RB [ \-\-maps ]
|
||||
.RB [ \-\-nosuffix ]
|
||||
.RB [ \-P | \-\-partial ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-S | \-\-select
|
||||
.IR Selection ]
|
||||
.RB [ \-\-units
|
||||
.IR hHbBsSkKmMgGtTpPeE ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-\-version ]
|
||||
.RI [ VolumeGroupName | LogicalVolume { Name | Path }\ ...]
|
||||
.br
|
||||
|
||||
.B lvdisplay
|
||||
.BR \-C | \-\-columns
|
||||
.RB [ \-\-aligned ]
|
||||
.RB [ \-\-binary ]
|
||||
.RB [ \-a | \-\-all ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [[ \-\-configreport
|
||||
.IR ReportName ]
|
||||
.RB [ \-o | \-\-options
|
||||
.RI [ + | \- | # ] Field1 [, Field2 ...]
|
||||
.RB [ \-O | \-\-sort
|
||||
.RI [ + | \- ] Key1 [, Key2 ...]]
|
||||
.RB [ \-S | \-\-select
|
||||
.IR Selection ]
|
||||
.RB ...]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-\-ignorelockingfailure ]
|
||||
.RB [ \-\-ignoreskippedcluster ]
|
||||
.RB [ \-\-logonly ]
|
||||
.RB [ \-\-noheadings ]
|
||||
.RB [ \-\-nosuffix ]
|
||||
.RB [ \-P | \-\-partial ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-\-segments ]
|
||||
.RB [ \-\-separator
|
||||
.IR Separator ]
|
||||
.RB [ \-\-unbuffered ]
|
||||
.RB [ \-\-units
|
||||
.IR hHbBsSkKmMgGtTpPeE ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-\-version ]
|
||||
.RI [ VolumeGroupName | LogicalVolume { Name | Path }\ ...]
|
||||
.SH DESCRIPTION
|
||||
lvdisplay allows you to see the attributes of a logical volume
|
||||
like size, read/write status, snapshot information etc.
|
||||
.P
|
||||
\fBlvs\fP(8) is an alternative that provides the same information
|
||||
in the style of \fBps\fP(1).
|
||||
\fBlvs\fP(8) is recommended over \fBlvdisplay\fP.
|
||||
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options and \fBlvs\fP for options given with
|
||||
\fB\-\-columns\fP.
|
||||
.TP
|
||||
.B \-\-all
|
||||
Include information in the output about internal Logical Volumes that
|
||||
are components of normally-accessible Logical Volumes, such as mirrors,
|
||||
but which are not independently accessible (e.g. not mountable).
|
||||
For example, after creating a mirror using
|
||||
\fBlvcreate \-m1 \-\-mirrorlog disk\fP,
|
||||
this option will reveal three internal Logical Volumes, with suffixes
|
||||
mimage_0, mimage_1, and mlog.
|
||||
.TP
|
||||
.BR \-C ", " \-\-columns
|
||||
Display output in columns, the equivalent of \fBlvs\fP(8).
|
||||
Options listed are the same as options given in \fBlvs\fP(8).
|
||||
.TP
|
||||
.BR \-c ", " \-\-colon
|
||||
Generate colon separated output for easier parsing in scripts or programs.
|
||||
N.B. \fBlvs\fP(8) provides considerably more control over the output.
|
||||
.nf
|
||||
|
||||
The values are:
|
||||
|
||||
\(bu logical volume name
|
||||
\(bu volume group name
|
||||
\(bu logical volume access
|
||||
\(bu logical volume status
|
||||
\(bu internal logical volume number
|
||||
\(bu open count of logical volume
|
||||
\(bu logical volume size in sectors
|
||||
\(bu current logical extents associated to logical volume
|
||||
\(bu allocated logical extents of logical volume
|
||||
\(bu allocation policy of logical volume
|
||||
\(bu read ahead sectors of logical volume
|
||||
\(bu major device number of logical volume
|
||||
\(bu minor device number of logical volume
|
||||
|
||||
.fi
|
||||
.TP
|
||||
.BR \-m ", " \-\-maps
|
||||
Display the mapping of logical extents to physical volumes and
|
||||
physical extents. To map physical extents
|
||||
to logical extents use:
|
||||
.B pvs \-\-segments \-o+lv_name,seg_start_pe,segtype
|
||||
.SH Examples
|
||||
Shows attributes of that logical volume. If snapshot
|
||||
logical volumes have been created for this original logical volume,
|
||||
this command shows a list of all snapshot logical volumes and their
|
||||
status (active or inactive) as well:
|
||||
.sp
|
||||
.B lvdisplay \-v vg00/lvol2
|
||||
|
||||
Shows the attributes of this snapshot logical volume and also which
|
||||
original logical volume it is associated with:
|
||||
.sp
|
||||
.B lvdisplay vg00/snapshot
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8),
|
||||
.BR lvcreate (8),
|
||||
.BR lvs (8),
|
||||
.BR lvscan (8),
|
||||
.BR pvs (8)
|
||||
5
man/lvextend.8.des
Normal file
5
man/lvextend.8.des
Normal file
@@ -0,0 +1,5 @@
|
||||
lvextend extends the size of an LV. This requires allocating logical
|
||||
extents from the VG's free physical extents. A copy\-on\-write snapshot LV
|
||||
can also be extended to provide more space to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
||||
16
man/lvextend.8.end
Normal file
16
man/lvextend.8.end
Normal file
@@ -0,0 +1,16 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Extend the size of an LV by 54MiB, using a specific PV.
|
||||
.br
|
||||
.B lvextend \-L +54 vg01/lvol10 /dev/sdk3
|
||||
|
||||
Extend the size of an LV by the amount of free
|
||||
space on PV /dev/sdk3. This is equivalent to specifying
|
||||
"\-l +100%PVS" on the command line.
|
||||
.br
|
||||
.B lvextend vg01/lvol01 /dev/sdk3
|
||||
|
||||
Extend an LV by 16MiB using specific physical extents.
|
||||
.br
|
||||
.B lvextend \-L+16m vg01/lvol01 /dev/sda:8\-9 /dev/sdb:8\-9
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
.TH LVEXTEND 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvextend \(em extend the size of a logical volume
|
||||
.SH SYNOPSIS
|
||||
.B lvextend
|
||||
.RB [ \-\-alloc
|
||||
.IR AllocationPolicy ]
|
||||
.RB [ \-A | \-\-autobackup
|
||||
.RI { y | n }]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-f | \-\-force ]
|
||||
.RB [ \-i | \-\-stripes
|
||||
.I Stripes
|
||||
.RB [ \-I | \-\-stripesize
|
||||
.IR StripeSize ]]
|
||||
.RB { \-l | \-\-extents
|
||||
.RI [ + ] LogicalExtentsNumber [ % { VG | LV | PVS | FREE | ORIGIN }]
|
||||
|
|
||||
.BR \-L | \-\-size
|
||||
.RI [ + ] LogicalVolumeSize [ bBsSkKmMgGtTpPeE ]}
|
||||
.RB [ \-n | \-\-nofsck ]
|
||||
.RB [ \-\-noudevsync]
|
||||
.RB [ \-r | \-\-resizefs ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-\-use\-policies ]
|
||||
.RB [ \-t | \-\-test ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.I LogicalVolumePath
|
||||
.RI [ PhysicalVolumePath [ :PE [ \-PE ]]...]
|
||||
.SH DESCRIPTION
|
||||
lvextend allows you to extend the size of a logical volume.
|
||||
Extension of snapshot logical volumes (see
|
||||
.BR lvcreate (8)
|
||||
for information to create snapshots) is supported as well.
|
||||
But to change the number of copies in a mirrored logical
|
||||
volume use
|
||||
.BR lvconvert (8).
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.TP
|
||||
.BR \-f ", " \-\-force
|
||||
Proceed with size extension without prompting.
|
||||
.TP
|
||||
.IR \fB\-l ", " \fB\-\-extents " [" + ] LogicalExtentsNumber [ % { VG | LV | PVS | FREE | ORIGIN }]
|
||||
Extend or set the logical volume size in units of logical extents.
|
||||
With the '\fI+\fP' sign the value is added to the actual size
|
||||
of the logical volume and without it, the value is taken as an absolute one.
|
||||
The total number of physical extents allocated will be
|
||||
greater than this, for example, if the volume is mirrored.
|
||||
The number can also be expressed as a percentage of the total space
|
||||
in the Volume Group with the suffix \fI%VG\fP, relative to the existing
|
||||
size of the Logical Volume with the suffix \fI%LV\fP, of the remaining
|
||||
free space for the specified PhysicalVolume(s) with the suffix \fI%PVS\fP,
|
||||
as a percentage of the remaining free space in the Volume Group
|
||||
with the suffix \fI%FREE\fP, or (for a snapshot) as a percentage of the total
|
||||
space in the Origin Logical Volume with the suffix \fI%ORIGIN\fP.
|
||||
The resulting value is rounded upward.
|
||||
N.B. In a future release, when expressed as a percentage with PVS, VG or FREE,
|
||||
the number will be treated as an approximate upper limit for the total number
|
||||
of physical extents to be allocated (including extents used by any mirrors, for
|
||||
example). The code may currently allocate more space than you might otherwise
|
||||
expect.
|
||||
.TP
|
||||
.IR \fB\-L ", " \fB\-\-size " [" + ] LogicalVolumeSize [ bBsSkKmMgGtTpPeE ]
|
||||
Extend or set the logical volume size in units of megabytes.
|
||||
A size suffix of M for megabytes,
|
||||
G for gigabytes, T for terabytes, P for petabytes
|
||||
or E for exabytes is optional.
|
||||
With the + sign the value is added to the actual size
|
||||
of the logical volume and without it, the value is taken as an absolute one.
|
||||
.TP
|
||||
.BR \-i ", " \-\-stripes " " \fIStripes
|
||||
Gives the number of stripes for the extension.
|
||||
Not applicable to LVs using the original metadata LVM format, which must
|
||||
use a single value throughout.
|
||||
.TP
|
||||
.BR \-I ", " \-\-stripesize " " \fIStripeSize
|
||||
Gives the number of kilobytes for the granularity of the stripes.
|
||||
Not applicable to LVs using the original metadata LVM format, which must
|
||||
use a single value throughout.
|
||||
.br
|
||||
StripeSize must be 2^n (n = 2 to 9)
|
||||
.TP
|
||||
.BR \-n ", " \-\-nofsck
|
||||
Do not perform fsck before extending filesystem when filesystem
|
||||
requires it. You may need to use \fB\-\-force\fR to proceed with
|
||||
this option.
|
||||
.TP
|
||||
.B \-\-noudevsync
|
||||
Disable udev synchronisation. The
|
||||
process will not wait for notification from udev.
|
||||
It will continue irrespective of any possible udev processing
|
||||
in the background. You should only use this if udev is not running
|
||||
or has rules that ignore the devices LVM2 creates.
|
||||
.TP
|
||||
.BR \-r ", " \-\-resizefs
|
||||
Resize underlying filesystem together with the logical volume using
|
||||
\fBfsadm\fR(8).
|
||||
.TP
|
||||
.B \-\-use\-policies
|
||||
Resizes the logical volume according to configured policy. See
|
||||
\fBlvm.conf\fR(5) for some details.
|
||||
|
||||
.SH Examples
|
||||
Extends the size of the logical volume "vg01/lvol10" by 54MiB on physical
|
||||
volume /dev/sdk3. This is only possible if /dev/sdk3 is a member of
|
||||
volume group vg01 and there are enough free physical extents in it:
|
||||
.sp
|
||||
.B lvextend \-L +54 /dev/vg01/lvol10 /dev/sdk3
|
||||
|
||||
Extends the size of logical volume "vg01/lvol01" by the amount of free
|
||||
space on physical volume /dev/sdk3. This is equivalent to specifying
|
||||
"\-l +100%PVS" on the command line:
|
||||
.sp
|
||||
.B lvextend /dev/vg01/lvol01 /dev/sdk3
|
||||
|
||||
Extends a logical volume "vg01/lvol01" by 16MiB using physical extents
|
||||
/dev/sda:8\-9 and /dev/sdb:8\-9 for allocation of extents:
|
||||
.sp
|
||||
.B lvextend -L+16M vg01/lvol01 /dev/sda:8\-9 /dev/sdb:8\-9
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR fsadm (8),
|
||||
.BR lvm (8),
|
||||
.BR lvm.conf (5),
|
||||
.BR lvcreate (8),
|
||||
.BR lvconvert (8),
|
||||
.BR lvreduce (8),
|
||||
.BR lvresize (8),
|
||||
.BR lvchange (8)
|
||||
5
man/lvm-config.8.des
Normal file
5
man/lvm-config.8.des
Normal file
@@ -0,0 +1,5 @@
|
||||
This command is the same as \fBlvmconfig\fP(8).
|
||||
|
||||
lvm config produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
||||
@@ -1 +0,0 @@
|
||||
.so man8/lvmconfig.8
|
||||
5
man/lvm-dumpconfig.8.des
Normal file
5
man/lvm-dumpconfig.8.des
Normal file
@@ -0,0 +1,5 @@
|
||||
This command is the same as \fBlvmconfig\fP(8).
|
||||
|
||||
lvm dumpconfig produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
||||
@@ -1 +0,0 @@
|
||||
.so man8/lvmconfig.8
|
||||
6
man/lvm-fullreport.8.des
Normal file
6
man/lvm-fullreport.8.des
Normal file
@@ -0,0 +1,6 @@
|
||||
lvm fullreport produces formatted output about PVs, PV segments, VGs, LVs
|
||||
and LV segments. The information is all gathered together for each VG
|
||||
(under a per-VG lock) so it is consistent. Information gathered from
|
||||
separate calls to \fBvgs\fP, \fBpvs\fP, and \fBlvs\fP can be inconsistent
|
||||
if information changes between commands.
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
.TH LVM-FULLREPORT 8 "LVM TOOLS #VERSION#" "Red Hat, Inc" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvm fullreport \(em Report information about PVs, PV segments, VGs, LVs and LV segments, all at once for each VG.
|
||||
.SH SYNOPSIS
|
||||
.B lvm fullreport
|
||||
.RB [ \-a | \-\-all ]
|
||||
.RB [ \-\-aligned ]
|
||||
.RB [ \-\-binary ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [[ \-\-configreport
|
||||
.IR ReportName ]
|
||||
.RB [ \-o | \-\-options
|
||||
.RI [ + | \- | # ] Field1 [, Field2 ...]
|
||||
.RB [ \-O | \-\-sort
|
||||
.RI [ + | \- ] Key1 [, Key2 ...]]
|
||||
.RB [ \-S | \-\-select
|
||||
.IR Selection ]
|
||||
.RB ...]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-\-ignorelockingfailure ]
|
||||
.RB [ \-\-ignoreskippedcluster ]
|
||||
.RB [ \-\-logonly ]
|
||||
.RB [ \-\-nameprefixes ]
|
||||
.RB [ \-\-noheadings ]
|
||||
.RB [ \-\-nosuffix ]
|
||||
.RB [ \-P | \-\-partial ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-\-rows ]
|
||||
.RB [ \-\-separator
|
||||
.IR Separator ]
|
||||
.RB [ \-\-unbuffered ]
|
||||
.RB [ \-\-units
|
||||
.IR hHbBsSkKmMgGtTpPeE ]
|
||||
.RB [ \-\-unquoted ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-\-version ]
|
||||
.RI [ VolumeGroupName
|
||||
.RI [ VolumeGroupName ...]]
|
||||
.SH DESCRIPTION
|
||||
lvm fullreport produces formatted output about PVs, PV segments, VGs, LVs
|
||||
and LV segments, all at once for each VG and guarded by per-VG lock
|
||||
for consistency.
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.TP
|
||||
.B \-\-all
|
||||
Include information in the output about internal Logical Volumes that
|
||||
are components of normally-accessible Logical Volumes, such as mirrors,
|
||||
but which are not independently accessible (e.g. not mountable).
|
||||
The names of such Logical Volumes are enclosed within square brackets
|
||||
in the output. For example, after creating a mirror using
|
||||
.B lvcreate -m1 \-\-mirrorlog disk
|
||||
, this option will reveal three internal Logical
|
||||
Volumes, with suffixes mimage_0, mimage_1, and mlog.
|
||||
.TP
|
||||
.B \-\-aligned
|
||||
Use with \fB\-\-separator\fP to align the output columns.
|
||||
.TP
|
||||
.B \-\-binary
|
||||
Use binary values "0" or "1" instead of descriptive literal values
|
||||
for columns that have exactly two valid values to report (not counting
|
||||
the "unknown" value which denotes that the value could not be determined).
|
||||
.TP
|
||||
.B \-\-configreport \fI ReportName
|
||||
Make any subsequent \fB\-o, \-\-options\fP, \fB\-O, \-\-sort\fP or
|
||||
\fB\-S, \-\-select\fP to apply for \fIReportName\fP where \fIReportName\fP
|
||||
is 'pv' for PV subreport, 'pvseg' for PV segment subreport, 'vg' for
|
||||
VG subreport, 'lv' for LV subreport, 'seg' for LV segment subreport or 'log'
|
||||
for log report. If \fB\-\-configreport\fP option is not used to identify a
|
||||
report, then all command's subreports are assumed except log report. The log
|
||||
report is available only if enabled by \fBlog/report_command_log\fP
|
||||
\fBlvm.conf\fP(5) setting or if \fB\-\-logonly\fP option is used.
|
||||
.TP
|
||||
.B \-\-logonly
|
||||
Suppress the main report itself and display only log report on output.
|
||||
.TP
|
||||
.B \-\-nameprefixes
|
||||
Add an "LVM2_" prefix plus the field name to the output. Useful
|
||||
with \fB\-\-noheadings\fP to produce a list of field=value pairs that can
|
||||
be used to set environment variables (for example, in \fBudev\fP(7) rules).
|
||||
.TP
|
||||
.B \-\-noheadings
|
||||
Suppress the headings line that is normally the first line of output.
|
||||
Useful if grepping the output.
|
||||
.TP
|
||||
.B \-\-nosuffix
|
||||
Suppress the suffix on output sizes. Use with \fB\-\-units\fP
|
||||
(except h and H) if processing the output.
|
||||
.TP
|
||||
.BR \-o ", " \-\-options
|
||||
Comma-separated ordered list of columns.
|
||||
.IP
|
||||
Precede the list with '\fI+\fP' to append to the current list
|
||||
of columns, '\fI-\fP' to remove from the current list of columns
|
||||
or '\fI#\fP' to compact given columns. The \fI\-o\fP option can
|
||||
be repeated, providing several lists. These lists are evaluated
|
||||
from left to right.
|
||||
.IP
|
||||
For the list of columns, see \fBpvs\fP(8), \fBvgs\fP(8),
|
||||
\fBlvs\fP(8) man page or check \fBpvs\fP, \fBvgs\fP, \fBlvs -o help\fP
|
||||
output.
|
||||
.TP
|
||||
.BR \-O ", " \-\-sort
|
||||
Comma-separated ordered list of columns to sort by. Replaces the default
|
||||
selection. Precede any column with '\fI\-\fP' for a reverse sort on that
|
||||
column.
|
||||
.TP
|
||||
.B \-\-rows
|
||||
Output columns as rows.
|
||||
.TP
|
||||
.BR \-S ", " \-\-select " " \fISelection
|
||||
Display only rows that match Selection criteria. All rows are displayed with
|
||||
the additional "selected" column (\fB-o selected\fP) showing 1 if the row
|
||||
matches the Selection and 0 otherwise. The Selection criteria are defined
|
||||
by specifying column names and their valid values (that can include reserved
|
||||
values) while making use of supported comparison operators. See \fBlvm\fP(8)
|
||||
and \fB\-S\fP, \fB\-\-select\fP description for more detailed information
|
||||
about constructing the Selection criteria. As a quick help and to see full
|
||||
list of column names that can be used in Selection including the list of
|
||||
reserved values and the set of supported selection operators, check the
|
||||
output of \fBpvs\fP, \fBvgs\fP, \fBlvs -S help\fP command.
|
||||
.TP
|
||||
.B \-\-separator \fISeparator
|
||||
String to use to separate each column. Useful if grepping the output.
|
||||
.TP
|
||||
.B \-\-unbuffered
|
||||
Produce output immediately without sorting or aligning the columns properly.
|
||||
.TP
|
||||
.B \-\-units \fIhHbBsSkKmMgGtTpPeE
|
||||
All sizes are output in these units: (h)uman-readable, (b)ytes, (s)ectors,
|
||||
(k)ilobytes, (m)egabytes, (g)igabytes, (t)erabytes, (p)etabytes, (e)xabytes.
|
||||
Capitalise to use multiples of 1000 (S.I.) instead of 1024. Can also specify
|
||||
custom units e.g. \-\-units 3M
|
||||
.TP
|
||||
.B \-\-unquoted
|
||||
When used with \fB\-\-nameprefixes\fP, output values in the field=value
|
||||
pairs are not quoted.
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8),
|
||||
.BR pvs (8),
|
||||
.BR vgs (8),
|
||||
.BR lvs (8)
|
||||
4
man/lvm-lvpoll.8.des
Normal file
4
man/lvm-lvpoll.8.des
Normal file
@@ -0,0 +1,4 @@
|
||||
lvm lvpoll is an internal command used by \fBlvmpolld\fP(8) to monitor and
|
||||
complete \fBlvconvert\fP(8) and \fBpvmove\fP(8) operations. lvpoll itself
|
||||
does not initiate these operations and should not normally need to be run
|
||||
directly.
|
||||
33
man/lvm-lvpoll.8.end
Normal file
33
man/lvm-lvpoll.8.end
Normal file
@@ -0,0 +1,33 @@
|
||||
.SH NOTES
|
||||
|
||||
To find the name of the pvmove LV that was created by an original
|
||||
\fBpvmove /dev/name\fP command, use the command:
|
||||
.br
|
||||
\fBlvs -a -S move_pv=/dev/name\fP.
|
||||
|
||||
.SH EXAMPLES
|
||||
|
||||
Continue polling a pvmove operation.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove vg00/pvmove0
|
||||
|
||||
Abort a pvmove operation.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation pvmove --abort vg00/pvmove0
|
||||
|
||||
Continue polling a mirror conversion.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg00/lvmirror
|
||||
|
||||
Continue mirror repair.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation convert vg/damaged_mirror --handlemissingpvs
|
||||
|
||||
Continue snapshot merge.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge vg/snapshot_old
|
||||
|
||||
Continue thin snapshot merge.
|
||||
.br
|
||||
.B lvm lvpoll --polloperation merge_thin vg/thin_snapshot
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
.TH "LVPOLL" "8" "LVM TOOLS #VERSION#" "Red Hat, Inc" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvpoll \(em Internal command used by lvmpolld to complete some Logical Volume operations.
|
||||
|
||||
.SH SYNOPSIS
|
||||
.B lvm lvpoll
|
||||
.BR \-\-polloperation
|
||||
.RI { pvmove | convert | merge | merge_thin }
|
||||
.RB [ \-\-abort ]
|
||||
.RB [ \-A | \-\-autobackup
|
||||
.RI { y | n }]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-\-handlemissingpvs ]
|
||||
.RB [ \-i | \-\-interval
|
||||
.IR Seconds ]
|
||||
.RB [ \-t | \-\-test ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-\-version ]
|
||||
.IR LogicalVolume [ Path ]
|
||||
.SH DESCRIPTION
|
||||
\fBlvpoll\fP is an internal command used by \fBlvmpolld\fP(8) to monitor and
|
||||
complete \fBlvconvert\fP(8) and \fBpvmove\fP(8) operations.
|
||||
\fBlvpoll\fP itself does not initiate these operations and
|
||||
you should never normally need to invoke it directly.
|
||||
|
||||
.I LogicalVolume
|
||||
The Logical Volume undergoing conversion or, in the case of pvmove, the name of
|
||||
the internal pvmove Logical Volume (see \fBEXAMPLES\fP).
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.TP
|
||||
.BR \-\-polloperation " {" \fIconvert | \fImerge | \fImerge_thin | \fIpvmove }
|
||||
Mandatory option.
|
||||
\fIpvmove\fP refers to a pvmove operation that is moving data.
|
||||
\fIconvert\fP refers to an operation that is increasing the number of redundant copies of data maintained by a mirror.
|
||||
\fImerge\fP indicates a merge operation that doesn't involve thin volumes.
|
||||
\fImerge_thin\fP indicates a merge operation involving thin snapshots.
|
||||
\fBpvmove\fP(8) and \fBlvconvert\fP(8) describe how to initiate these operations.
|
||||
.TP
|
||||
.B \-\-abort
|
||||
Abort pvmove in progress. See \fBpvmove\fP(8).
|
||||
.TP
|
||||
.B \-\-handlemissingpvs
|
||||
Used when the polling operation needs to handle missing PVs to be able to
|
||||
continue. This can happen when \fBlvconvert\fP(8) is repairing a mirror
|
||||
with one or more faulty devices.
|
||||
.TP
|
||||
.BR \-i ", " \-\-interval " "\fISeconds
|
||||
Report progress at regular intervals
|
||||
|
||||
.SH EXAMPLES
|
||||
Resume polling of a pvmove operation identified by the Logical Volume vg00/pvmove0:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation pvmove vg00/pvmove0
|
||||
.P
|
||||
Abort the same pvmove operation:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation pvmove --abort vg00/pvmove0
|
||||
.P
|
||||
To find out the name of the pvmove Logical Volume resulting from an original
|
||||
\fBpvmove /dev/sda1\fP command you may use the following \fBlvs\fP command.
|
||||
(Remove the parentheses from the LV name.)
|
||||
.sp
|
||||
.B lvs -a -S move_pv=/dev/sda1
|
||||
.P
|
||||
Resume polling of mirror conversion vg00/lvmirror:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation convert vg00/lvmirror
|
||||
.P
|
||||
Complete mirror repair:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation convert vg/damaged_mirror --handlemissingpvs
|
||||
.P
|
||||
Process snapshot merge:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation merge vg/snapshot_old
|
||||
.P
|
||||
Finish thin snapshot merge:
|
||||
.sp
|
||||
.B lvm lvpoll --polloperation merge_thin vg/thin_snapshot
|
||||
.SH SEE ALSO
|
||||
.BR lvconvert (8),
|
||||
.BR lvm (8),
|
||||
.BR lvmpolld (8),
|
||||
.BR lvs (8),
|
||||
.BR pvmove (8)
|
||||
258
man/lvm.8.in
258
man/lvm.8.in
@@ -45,6 +45,9 @@ A file containing a simple script with one command per line
|
||||
can also be given on the command line. The script can also be
|
||||
executed directly if the first line is #! followed by the absolute
|
||||
path of \fBlvm\fP.
|
||||
.P
|
||||
Additional hyphens within option names are ignored. For example,
|
||||
\fB\-\-readonly\fP and \fB\-\-read\-only\fP are both accepted.
|
||||
.
|
||||
.SH BUILT-IN COMMANDS
|
||||
.
|
||||
@@ -238,261 +241,6 @@ The following commands are not implemented in LVM2 but might be
|
||||
in the future:
|
||||
.BR lvmsadc ", " lvmsar ", " pvdata .
|
||||
.
|
||||
.SH OPTIONS
|
||||
.
|
||||
The following options are available for many of the commands.
|
||||
They are implemented generically and documented here rather
|
||||
than repeated on individual manual pages.
|
||||
.P
|
||||
Additional hyphens within option names are ignored. For example,
|
||||
\fB\-\-readonly\fP and \fB\-\-read\-only\fP are both accepted.
|
||||
.
|
||||
.HP
|
||||
.BR \-h | \-? | \-\-help
|
||||
.br
|
||||
Display the help text.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-version
|
||||
.br
|
||||
Display version information.
|
||||
.
|
||||
.HP
|
||||
.BR \-v | \-\-verbose
|
||||
.br
|
||||
Set verbose level. Repeat from 1 to 3 times to increase the detail
|
||||
of messages sent to stdout and stderr. Overrides config file setting.
|
||||
.
|
||||
.HP
|
||||
.BR \-d | \-\-debug
|
||||
.br
|
||||
Set debug level. Repeat from 1 to 6 times to increase the detail of
|
||||
messages sent to the log file and/or syslog (if configured).
|
||||
Overrides config file setting.
|
||||
.
|
||||
.HP
|
||||
.BR \-q | \-\-quiet
|
||||
.br
|
||||
Suppress output and log messages.
|
||||
Overrides \fB\-d\fP and \fB\-v\fP.
|
||||
Repeat once to also suppress any prompts with answer 'no'.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-yes
|
||||
.br
|
||||
Don't prompt for confirmation interactively but instead always assume the
|
||||
answer is 'yes'. Take great care if you use this!
|
||||
.
|
||||
.HP
|
||||
.BR \-t | \-\-test
|
||||
.br
|
||||
Run in test mode. Commands will not update metadata.
|
||||
This is implemented by disabling all metadata writing but nevertheless
|
||||
returning success to the calling function. This may lead to unusual
|
||||
error messages in multi-stage operations if a tool relies on reading
|
||||
back metadata it believes has changed but hasn't.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-driverloaded
|
||||
.RB { y | n }
|
||||
.br
|
||||
Whether or not the device-mapper kernel driver is loaded.
|
||||
If you set this to \fBn\fP, no attempt will be made to contact the driver.
|
||||
.
|
||||
.HP
|
||||
.BR \-A | \-\-autobackup
|
||||
.RB { y | n }
|
||||
.br
|
||||
Whether or not to metadata should be backed up automatically after a change.
|
||||
You are strongly advised not to disable this!
|
||||
See \fBvgcfgbackup\fP(8).
|
||||
.
|
||||
.HP
|
||||
.BR \-P | \-\-partial
|
||||
.br
|
||||
When set, the tools will do their best to provide access to Volume Groups
|
||||
that are only partially available (one or more Physical Volumes belonging
|
||||
to the Volume Group are missing from the system). Where part of a logical
|
||||
volume is missing, \fI\%/dev/ioerror\fP will be substituted, and you could use
|
||||
\fBdmsetup\fP(8) to set this up to return I/O errors when accessed,
|
||||
or create it as a large block device of nulls. Metadata may not be
|
||||
changed with this option. To insert a replacement Physical Volume
|
||||
of the same or large size use \fBpvcreate \-u\fP to set the uuid to
|
||||
match the original followed by \fBvgcfgrestore\fP(8).
|
||||
.
|
||||
.HP
|
||||
.BR \-S | \-\-select
|
||||
.IR Selection
|
||||
.br
|
||||
For reporting commands, display only rows that match \fISelection\fP criteria.
|
||||
All rows are displayed with the additional "selected" column (\fB-o selected\fP)
|
||||
showing 1 if the row matches the \fISelection\fP and 0 otherwise. For non-reporting
|
||||
commands which process LVM entities, the selection can be used to match items
|
||||
to process. See \fBSelection\fP section in \fBlvmreport\fP(7) man page for more
|
||||
information about the way the selection criteria are constructed.
|
||||
.
|
||||
.HP
|
||||
.BR \-M | \-\-metadatatype
|
||||
.IR Type
|
||||
.br
|
||||
Specifies which \fItype\fP of on-disk metadata to use, such as \fBlvm1\fP
|
||||
or \fBlvm2\fP, which can be abbreviated to \fB1\fP or \fB2\fP respectively.
|
||||
The default (\fBlvm2\fP) can be changed by setting \fBformat\fP
|
||||
in the \fBglobal\fP section of the config file \fBlvm.conf\fP(5).
|
||||
.
|
||||
.HP
|
||||
.BR \-\-ignorelockingfailure
|
||||
.br
|
||||
This lets you proceed with read-only metadata operations such as
|
||||
\fBlvchange \-ay\fP and \fBvgchange \-ay\fP even if the locking module fails.
|
||||
One use for this is in a system init script if the lock directory
|
||||
is mounted read-only when the script runs.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-ignoreskippedcluster
|
||||
.br
|
||||
Use to avoid exiting with an non-zero status code if the command is run
|
||||
without clustered locking and some clustered Volume Groups have to be
|
||||
skipped over.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-readonly
|
||||
.br
|
||||
Run the command in a special read-only mode which will read on-disk
|
||||
metadata without needing to take any locks. This can be used to peek
|
||||
inside metadata used by a virtual machine image while the virtual
|
||||
machine is running.
|
||||
It can also be used to peek inside the metadata of clustered Volume
|
||||
Groups when clustered locking is not configured or running. No attempt
|
||||
will be made to communicate with the device-mapper kernel driver, so
|
||||
this option is unable to report whether or not Logical Volumes are
|
||||
actually in use.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-foreign
|
||||
.br
|
||||
Cause the command to access foreign VGs, that would otherwise be skipped.
|
||||
It can be used to report or display a VG that is owned by another host.
|
||||
This option can cause a command to perform poorly because lvmetad caching
|
||||
is not used and metadata is read from disks.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-shared
|
||||
.br
|
||||
Cause the command to access shared VGs, that would otherwise be skipped
|
||||
when lvmlockd is not being used. It can be used to report or display a
|
||||
lockd VG without locking. Applicable only if LVM is compiled with lockd
|
||||
support.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-addtag
|
||||
.IR Tag
|
||||
.br
|
||||
Add the tag \fITag\fP to a PV, VG or LV.
|
||||
Supply this argument multiple times to add more than one tag at once.
|
||||
A tag is a word that can be used to group LVM2 objects of the same type
|
||||
together.
|
||||
Tags can be given on the command line in place of PV, VG or LV
|
||||
arguments. Tags should be prefixed with @ to avoid ambiguity.
|
||||
Each tag is expanded by replacing it with all objects possessing
|
||||
that tag which are of the type expected by its position on the command line.
|
||||
PVs can only possess tags while they are part of a Volume Group:
|
||||
PV tags are discarded if the PV is removed from the VG.
|
||||
As an example, you could tag some LVs as \fBdatabase\fP and others
|
||||
as \fBuserdata\fP and then activate the database ones
|
||||
with \fBlvchange \-ay @database\fP.
|
||||
Objects can possess multiple tags simultaneously.
|
||||
Only the new LVM2 metadata format supports tagging: objects using the
|
||||
LVM1 metadata format cannot be tagged because the on-disk format does not
|
||||
support it.
|
||||
Characters allowed in tags are:
|
||||
.BR A - Z
|
||||
.BR a - z
|
||||
.BR 0 - 9
|
||||
.BR "_ + . -"
|
||||
and as of version 2.02.78 the following characters are also accepted:
|
||||
.BR "/ = ! : # &" .
|
||||
.
|
||||
.HP
|
||||
.BR \-\-deltag
|
||||
.IR Tag
|
||||
.br
|
||||
Delete the tag \fITag\fP from a PV, VG or LV, if it's present.
|
||||
Supply this argument multiple times to remove more than one tag at once.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-alloc
|
||||
.RB { anywhere | contiguous | cling | inherit | normal }
|
||||
.br
|
||||
Selects the allocation policy when a command needs to allocate
|
||||
Physical Extents from the Volume Group.
|
||||
Each Volume Group and Logical Volume has an allocation policy defined.
|
||||
The default for a Volume Group is \fBnormal\fP which applies
|
||||
common-sense rules such as not placing parallel stripes on the same
|
||||
Physical Volume. The default for a Logical Volume is \fBinherit\fP
|
||||
which applies the same policy as for the Volume Group. These policies can
|
||||
be changed using \fBlvchange\fP(8) and \fBvgchange\fP(8) or overridden
|
||||
on the command line of any command that performs allocation.
|
||||
The \fBcontiguous\fP policy requires that new Physical Extents be placed adjacent
|
||||
to existing Physical Extents.
|
||||
The \fBcling\fP policy places new Physical Extents on the same Physical
|
||||
Volume as existing Physical Extents in the same stripe of the Logical Volume.
|
||||
If there are sufficient free Physical Extents to satisfy
|
||||
an allocation request but \fBnormal\fP doesn't use them,
|
||||
\fBanywhere\fP will - even if that reduces performance by
|
||||
placing two stripes on the same Physical Volume.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-commandprofile
|
||||
.IR ProfileName
|
||||
.br
|
||||
Selects the command configuration profile to use when processing an LVM command.
|
||||
See also \fBlvm.conf\fP(5) for more information about \fBcommand profile config\fP and
|
||||
the way it fits with other LVM configuration methods. Using \fB\-\-commandprofile\fP
|
||||
option overrides any command profile specified via \fBLVM_COMMAND_PROFILE\fP
|
||||
environment variable.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-metadataprofile
|
||||
.IR ProfileName
|
||||
.br
|
||||
Selects the metadata configuration profile to use when processing an LVM command.
|
||||
When using metadata profile during Volume Group or Logical Volume creation,
|
||||
the metadata profile name is saved in metadata. When such Volume Group or Logical
|
||||
Volume is processed next time, the metadata profile is automatically applied
|
||||
and the use of \fB\-\-metadataprofile\fP option is not necessary. See also
|
||||
\fBlvm.conf\fP(5) for more information about \fBmetadata profile config\fP and the
|
||||
way it fits with other LVM configuration methods.
|
||||
.
|
||||
.HP
|
||||
.BR \-\-profile
|
||||
.IR ProfileName
|
||||
.br
|
||||
A short form of \fB\-\-metadataprofile\fP for \fBvgcreate\fP, \fBlvcreate\fP,
|
||||
\fBvgchange\fP and \fBlvchange\fP command and a short form of \fB\-\-commandprofile\fP
|
||||
for any other command (with the exception of \fBlvmconfig\fP command where the
|
||||
\fB\-\-profile\fP has special meaning, see \fBlvmconfig\fP(8) for more information).
|
||||
.
|
||||
.HP
|
||||
.BR \-\-reportformat
|
||||
.IR {basic|json}
|
||||
.br
|
||||
Overrides current output format for reports which is defined globally by
|
||||
\fBreport/output_format\fP configuration setting in \fBlvm.conf\fP(5).
|
||||
The \fBbasic\fP format is the original format with columns and rows and
|
||||
if there is more than one report per command, each report is prefixed
|
||||
with report's name for identification. The \fBjson\fP stands for report
|
||||
output in JSON format.
|
||||
.HP
|
||||
.BR \-\-config
|
||||
.IR ConfigurationString
|
||||
.br
|
||||
Uses the ConfigurationString as direct string representation of the configuration
|
||||
to override the existing configuration. The ConfigurationString is of exactly
|
||||
the same format as used in any LVM configuration file. See \fBlvm.conf\fP(5)
|
||||
for more information about \fBdirect config override on command line\fP and the
|
||||
way it fits with other LVM configuration methods.
|
||||
.
|
||||
.SH VALID NAMES
|
||||
.
|
||||
The valid characters for VG and LV names are:
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
.TH LVMCHANGE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvmchange \(em change attributes of the logical volume manager
|
||||
.SH SYNOPSIS
|
||||
.B lvmchange
|
||||
.SH DESCRIPTION
|
||||
lvmchange is not currently supported under LVM2, although
|
||||
\fBdmsetup\fP(8) has a \fBremove_all\fP command.
|
||||
.SH SEE ALSO
|
||||
.BR dmsetup (8)
|
||||
3
man/lvmconfig.8.des
Normal file
3
man/lvmconfig.8.des
Normal file
@@ -0,0 +1,3 @@
|
||||
lvmconfig produces formatted output from the LVM configuration tree. The
|
||||
sources of the configuration data include \fBlvm.conf\fP(5) and command
|
||||
line settings from \-\-config.
|
||||
@@ -1,225 +0,0 @@
|
||||
.TH "LVMCONFIG" "8" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
|
||||
.SH "NAME"
|
||||
lvmconfig, lvm dumpconfig, lvm config \(em Display LVM configuration
|
||||
.SH SYNOPSIS
|
||||
.
|
||||
.ad l
|
||||
.B lvmconfig
|
||||
.RB [ \-f | \-\-file
|
||||
.IR Filename ]
|
||||
.RB [ \-\-type
|
||||
.RB { current | default | diff | full |\: list | missing | new \c
|
||||
.RB | profilable | profilable-command | profilable-metadata }]
|
||||
.RB [ \-\-atversion
|
||||
.IR Version ]
|
||||
.RB [ \-\-sinceversion
|
||||
.IR Version ]
|
||||
.RB [ \-\-ignoreadvanced ]
|
||||
.RB [ \-\-ignoreunsupported ]
|
||||
.RB [ \-\-ignorelocal ]
|
||||
.RB [ \-l | \-\-list ]
|
||||
.RB [ \-\-config
|
||||
.IR ConfigurationString ]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-\-profile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-\-metadataprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-\-mergedconfig ]
|
||||
.RB [ \-\-showdeprecated ]
|
||||
.RB [ \-\-showunsupported ]
|
||||
.RB [ \-\-validate ]
|
||||
.RB [ \-\-withsummary ]
|
||||
.RB [ \-\-withcomments ]
|
||||
.RB [ \-\-withspaces ]
|
||||
.RB [ \-\-withversions ]
|
||||
.RB [ ConfigurationNode... ]
|
||||
.ad b
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
lvmconfig produces formatted output from the LVM configuration tree.
|
||||
The command was added in release 2.02.119 and has an identical longer form
|
||||
\fBlvm dumpconfig\fP.
|
||||
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
.BR \-f ", " \-\-file " \fIFilename"
|
||||
Send output to a file named 'filename'.
|
||||
|
||||
.TP
|
||||
.BR \-l ", " \-\-list
|
||||
List configuration settings with summarizing comment. This is the same as using
|
||||
\fBlvmconfig --type list --withsummary\fP.
|
||||
|
||||
.TP
|
||||
.BR \-\-type " {" current | default | diff | full | missing | new | profilable |\: profilable-command | profilable-metadata }
|
||||
Select the type of configuration to display. The configuration settings
|
||||
displayed have either default values or currently-used values assigned based on
|
||||
the type selected. If no type is selected, \fB\-\-type current\fP is used
|
||||
by default. Whenever a configuration setting with a default value is
|
||||
commented out, it means the setting does not have any concrete default
|
||||
value defined. Output can be saved and used as a proper \fBlvm.conf\fP(5)
|
||||
file.
|
||||
.RS
|
||||
.IP \fBcurrent\fP 3
|
||||
Display the current \fBlvm.conf\fP configuration merged with any \fBtag
|
||||
config\fP if used. See also \fBlvm.conf\fP(5) for more info about LVM
|
||||
configuration methods.
|
||||
.IP \fBdefault\fP 3
|
||||
Display all possible configuration settings with default values assigned.
|
||||
.IP \fBdiff\fP 3
|
||||
Display all configuration settings for which the values used differ from defaults.
|
||||
The value assigned for each configuration setting is the value currently used.
|
||||
Using this type also implies the use of \fB\-\-mergedconfig\fP option.
|
||||
This is actually minimal LVM configuration which can be used without
|
||||
a change to current configured behaviour.
|
||||
.IP \fBfull\fP 3
|
||||
Display full configuration tree - a combination of current configuration tree
|
||||
(\fB\-\-type current\fP) and tree of settings for which default values are
|
||||
used (\fB\-\-type missing\fP). This is exactly the configuration tree that
|
||||
LVM2 uses during command execution. Using this type also implies
|
||||
the use of \fB\-\-mergedconfig\fP option. If comments are displayed
|
||||
(see \fB\-\-withcomments\fP and \fB\-\-withsummary\fP options), then
|
||||
for each setting found in existing configuration and for which defaults
|
||||
are not used, there's an extra comment line printed to denote this.
|
||||
.IP \fBlist\fP 3
|
||||
Display plain list of configuration settings.
|
||||
.IP \fBmissing\fP 3
|
||||
Display all configuration settings with default values assigned which are
|
||||
missing in the configuration currently used and for which LVM automatically
|
||||
fallbacks to using these default values.
|
||||
.IP \fBnew\fP 3
|
||||
Display all new configuration settings introduced in current LVM version
|
||||
or specific version as defined by \fB\-\-atversion\fP option.
|
||||
.IP \fBprofilable\fP 3
|
||||
Display all profilable configuration settings with default values assigned.
|
||||
See \fBlvm.conf\fP(5) for more info about \fBprofile config\fP method.
|
||||
.IP \fBprofilable-command\fP 3
|
||||
Display all profilable configuration settings with default values assigned
|
||||
that can be used in command profile. This is a subset of settings displayed
|
||||
by \fB\-\-type profilable\fP.
|
||||
.IP \fBprofilable-metadata\fP 3
|
||||
Display all profilable configuration settings with default values assigned
|
||||
that can be used in metadata profile. This is a subset of settings displayed
|
||||
by \fB\-\-type profilable\fP.
|
||||
.RE
|
||||
|
||||
.TP
|
||||
.BI \-\-atversion " Version"
|
||||
Specify an LVM version in x.y.z format where x is the major version,
|
||||
the y is the minor version and z is the patchlevel (e.g. 2.2.106).
|
||||
When configuration is displayed, the configuration settings recognized
|
||||
at this LVM version will be considered only. This can be used
|
||||
to display a configuration that a certain LVM version understands and
|
||||
which does not contain any newer settings for which LVM would
|
||||
issue a warning message when checking the configuration.
|
||||
|
||||
.TP
|
||||
.BI \-\-sinceversion " Version"
|
||||
Specify an LVM version in x.y.z format where x is the major version,
|
||||
the y is the minor version and z is the patchlevel (e.g. 2.2.106).
|
||||
This option is currently applicable only with \fB\-\-type new\fP
|
||||
to display all configuration settings introduced since given version.
|
||||
|
||||
.TP
|
||||
.B \-\-ignoreadvanced
|
||||
Exclude advanced configuration settings from the output.
|
||||
|
||||
.TP
|
||||
.B \-\-ignoreunsupported
|
||||
Exclude unsupported configuration settings from the output. These settings are
|
||||
either used for debugging and development purposes only or their support is not
|
||||
yet complete and they are not meant to be used in production. The \fBcurrent\fP
|
||||
and \fBdiff\fP types include unsupported settings in their output by default,
|
||||
all the other types ignore unsupported settings.
|
||||
|
||||
.TP
|
||||
.B \-\-ignorelocal
|
||||
Ignore local section.
|
||||
|
||||
.TP
|
||||
.BI \-\-config " ConfigurationString"
|
||||
Use \fIConfigurationString\fP to override existing configuration.
|
||||
This configuration is then applied for the lvmconfig command itself.
|
||||
See also \fBlvm.conf\fP(5) for more info about \fBconfig cascade\fP.
|
||||
|
||||
.TP
|
||||
.BI \-\-commandprofile " ProfileName"
|
||||
Use \fIProfileName\fP to override existing configuration.
|
||||
This configuration is then applied for the lvmconfig command itself.
|
||||
See also \fB\-\-mergedconfig\fP option and \fBlvm.conf\fP(5) for
|
||||
more info about \fBconfig cascade\fP.
|
||||
|
||||
.TP
|
||||
.BI \-\-profile " ProfileName"
|
||||
The same as using \fB\-\-commandprofile\fP but the configuration is not
|
||||
applied for the lvmconfig command itself.
|
||||
|
||||
.TP
|
||||
.BI \-\-metadataprofile " ProfileName"
|
||||
Use \fIProfileName\fP to override existing configuration.
|
||||
The configuration defined in metadata profile has no effect for
|
||||
the lvmconfig command itself. lvmconfig displays the configuration only.
|
||||
See also \fB\-\-mergedconfig\fP option and \fBlvm.conf\fP(5) for more
|
||||
info about \fBconfig cascade\fP.
|
||||
|
||||
.TP
|
||||
.B \-\-mergedconfig
|
||||
When the lvmconfig command is run with the \fB\-\-config\fP option
|
||||
and/or \fB\-\-commandprofile\fP (or using \fBLVM_COMMAND_PROFILE\fP
|
||||
environment variable), \fB\-\-profile\fP, \fB\-\-metadataprofile\fP
|
||||
option, merge all the contents of the \fBconfig cascade\fP before displaying it.
|
||||
Without the \fB\-\-mergeconfig\fP option used, only the configuration at
|
||||
the front of the cascade is displayed. See also \fBlvm.conf\fP(5) for more
|
||||
info about \fBconfig cascade\fP.
|
||||
|
||||
.TP
|
||||
.B \-\-showdeprecated
|
||||
Include deprecated configuration settings in the output. These settings
|
||||
are always deprecated since certain version. If concrete version is specified
|
||||
with \fB--atversion\fP option, deprecated settings are automatically included
|
||||
if specified version is lower that the version in which the settings were
|
||||
deprecated. The \fBcurrent\fP and \fBdiff\fP types include deprecated settings
|
||||
int their output by default, all the other types ignore deprecated settings.
|
||||
|
||||
.TP
|
||||
.B \-\-showunsupported
|
||||
Include unsupported configuration settings in the output. These settings
|
||||
are either used for debugging or development purposes only or their support
|
||||
is not yet complete and they are not meant to be used in production. The
|
||||
\fBcurrent\fP and \fBdiff\fP types include unsupported settings in their
|
||||
output by default, all the other types ignore unsupported settings.
|
||||
|
||||
.TP
|
||||
.B \-\-validate
|
||||
Validate current configuration used and exit with appropriate
|
||||
return code. The validation is done only for the configuration
|
||||
at the front of the \fBconfig cascade\fP. To validate the whole
|
||||
merged configuration tree, use also the \fB\-\-mergedconfig\fP option.
|
||||
The validation is done even if \fBconfig/checks\fP \fBlvm.conf\fP(5)
|
||||
option is disabled.
|
||||
|
||||
.TP
|
||||
.B \-\-withsummary
|
||||
Display a one line comment for each configuration node.
|
||||
|
||||
.TP
|
||||
.B \-\-withcomments
|
||||
Display a full comment for each configuration node. For deprecated
|
||||
settings, also display comments about deprecation in addition.
|
||||
|
||||
.TP
|
||||
.B \-\-withspaces
|
||||
Where appropriate, add more spaces in output for better readability.
|
||||
|
||||
.TP
|
||||
.B \-\-withversions
|
||||
Also display a comment containing the version of introduction for
|
||||
each configuration node. If the setting is deprecated, also display
|
||||
the version since which it is deprecated.
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8)
|
||||
.BR lvmconf (8)
|
||||
.BR lvm.conf (5)
|
||||
7
man/lvmdiskscan.8.des
Normal file
7
man/lvmdiskscan.8.des
Normal file
@@ -0,0 +1,7 @@
|
||||
lvmdiskscan scans all SCSI, (E)IDE disks, multiple devices and a bunch of
|
||||
other block devices in the system looking for LVM PVs. The size reported
|
||||
is the real device size. Define a filter in \fBlvm.conf\fP(5) to restrict
|
||||
the scan to avoid a CD ROM, for example.
|
||||
|
||||
This command is deprecated, use \fBpvs\fP instead.
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
.TH LVMDISKSCAN 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvmdiskscan \(em scan for all devices visible to LVM2
|
||||
.SH SYNOPSIS
|
||||
.B lvmdiskscan
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-? | \-\-help ]
|
||||
.RB [ \-l | \-\-lvmpartition ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.SH DESCRIPTION
|
||||
lvmdiskscan scans all SCSI, (E)IDE disks, multiple devices and a bunch
|
||||
of other block devices in the system looking for LVM physical volumes.
|
||||
The size reported is the real device size.
|
||||
Define a filter in \fBlvm.conf\fP(5) to restrict
|
||||
the scan to avoid a CD ROM, for example.
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.TP
|
||||
.BR \-l ", " \-\-lvmpartition
|
||||
Only reports Physical Volumes.
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8),
|
||||
.BR lvm.conf (5),
|
||||
.BR pvscan (8),
|
||||
.BR vgscan (8)
|
||||
@@ -573,25 +573,37 @@ To place the lvmlock LV on a specific device, create the VG with only that
|
||||
device, then use vgextend to add other devices.
|
||||
|
||||
|
||||
.SS shared LVs
|
||||
.SS LV activation
|
||||
|
||||
When an LV is used concurrently from multiple hosts (e.g. by a
|
||||
multi\-host/cluster application or file system), the LV can be activated
|
||||
on multiple hosts concurrently using a shared lock.
|
||||
In a shared VG, activation changes involve locking through lvmlockd, and
|
||||
the following values are possible with lvchange/vgchange -a:
|
||||
|
||||
To activate the LV with a shared lock: lvchange \-asy vg/lv.
|
||||
.IP \fBy\fP|\fBey\fP
|
||||
The command activates the LV in exclusive mode, allowing a single host
|
||||
to activate the LV. Before activating the LV, the command uses lvmlockd
|
||||
to acquire an exclusive lock on the LV. If the lock cannot be acquired,
|
||||
the LV is not activated and an error is reported. This would happen if
|
||||
the LV is active on another host.
|
||||
|
||||
With lvmlockd, an unspecified activation mode is always exclusive, i.e.
|
||||
\-ay defaults to \-aey.
|
||||
|
||||
If the LV type does not allow the LV to be used concurrently from multiple
|
||||
hosts, then a shared activation lock is not allowed and the lvchange
|
||||
command will report an error. LV types that cannot be used concurrently
|
||||
.IP \fBsy\fP
|
||||
The command activates the LV in shared mode, allowing multiple hosts to
|
||||
activate the LV concurrently. Before activating the LV, the
|
||||
command uses lvmlockd to acquire a shared lock on the LV. If the lock
|
||||
cannot be acquired, the LV is not activated and an error is reported.
|
||||
This would happen if the LV is active exclusively on another host. If the
|
||||
LV type prohibits shared access, such as a snapshot, the command will
|
||||
report an error and fail.
|
||||
The shared mode is intended for a multi\-host/cluster application or
|
||||
file system.
|
||||
LV types that cannot be used concurrently
|
||||
from multiple hosts include thin, cache, raid, mirror, and snapshot.
|
||||
|
||||
lvextend on LV with shared locks is not yet allowed. The LV must be
|
||||
deactivated, or activated exclusively to run lvextend.
|
||||
|
||||
.IP \fBn\fP
|
||||
The command deactivates the LV. After deactivating the LV, the command
|
||||
uses lvmlockd to release the current lock on the LV.
|
||||
|
||||
|
||||
.SS recover from lost PV holding sanlock locks
|
||||
|
||||
|
||||
3
man/lvmsadc.8.des
Normal file
3
man/lvmsadc.8.des
Normal file
@@ -0,0 +1,3 @@
|
||||
lvmsadc is not currently supported in LVM. The device-mapper statistics
|
||||
facility provides similar performance metrics using the \fBdmstats(8)\fP
|
||||
command.
|
||||
@@ -1,13 +0,0 @@
|
||||
.TH "LVMSADC" "8" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
|
||||
|
||||
.SH "NAME"
|
||||
lvmsadc \(em LVM system activity data collector
|
||||
|
||||
.SH "SYNOPSIS"
|
||||
.B lvmsadc
|
||||
|
||||
.SH "DESCRIPTION"
|
||||
lvmsadc is not currently supported under LVM2.
|
||||
|
||||
.SH "SEE ALSO"
|
||||
.BR lvm (8)
|
||||
3
man/lvmsar.8.des
Normal file
3
man/lvmsar.8.des
Normal file
@@ -0,0 +1,3 @@
|
||||
lvmsar is not currently supported in LVM. The device-mapper statistics
|
||||
facility provides similar performance metrics using the \fBdmstats(8)\fP
|
||||
command.
|
||||
@@ -1,13 +0,0 @@
|
||||
.TH "LVMSAR" "8" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
|
||||
|
||||
.SH "NAME"
|
||||
lvmsar \(em LVM system activity reporter
|
||||
|
||||
.SH "SYNOPSIS"
|
||||
.B lvmsar
|
||||
|
||||
.SH "DESCRIPTION"
|
||||
lvmsar is not currently supported under LVM2.
|
||||
|
||||
.SH "SEE ALSO"
|
||||
.BR lvm (8)
|
||||
@@ -157,17 +157,17 @@ The --thinpool argument specifies which thin pool will
|
||||
contain the ThinLV.
|
||||
.fi
|
||||
|
||||
.B lvcreate \-n ThinLV \-V VirtualSize \-\-thinpool VG/ThinPoolLV
|
||||
.B lvcreate \-n ThinLV \-V VirtualSize \-\-thinpool ThinPoolLV VG
|
||||
|
||||
.I Example
|
||||
.br
|
||||
Create a thin LV in a thin pool:
|
||||
.br
|
||||
# lvcreate \-n thin1 \-V 1T \-\-thinpool vg/pool0
|
||||
# lvcreate \-n thin1 \-V 1T \-\-thinpool pool0 vg
|
||||
|
||||
Create another thin LV in the same thin pool:
|
||||
.br
|
||||
# lvcreate \-n thin2 \-V 1T \-\-thinpool vg/pool0
|
||||
# lvcreate \-n thin2 \-V 1T \-\-thinpool pool0 vg
|
||||
|
||||
# lvs vg/thin1 vg/thin2
|
||||
LV VG Attr LSize Pool Origin Data%
|
||||
@@ -184,9 +184,9 @@ when creating a thin snapshot.
|
||||
.br
|
||||
A size argument will cause an old COW snapshot to be created.
|
||||
|
||||
.B lvcreate \-n SnapLV \-s VG/ThinLV
|
||||
.B lvcreate \-n SnapLV \-\-snapshot VG/ThinLV
|
||||
.br
|
||||
.B lvcreate \-n SnapLV \-s VG/PrevSnapLV
|
||||
.B lvcreate \-n SnapLV \-\-snapshot VG/PrevSnapLV
|
||||
|
||||
.I Example
|
||||
.br
|
||||
@@ -278,25 +278,6 @@ or vgchange to activate thin snapshots with the "k" attribute.
|
||||
|
||||
\&
|
||||
|
||||
.SS Alternate syntax for specifying type thin\-pool
|
||||
|
||||
\&
|
||||
|
||||
The fully specified syntax for creating a thin pool LV shown above is:
|
||||
|
||||
.B lvconvert \-\-type thin-pool \-\-poolmetadata VG/ThinMetaLV VG/ThinDataLV
|
||||
|
||||
An existing LV is converted to a thin pool by changing its type to
|
||||
thin-pool. An alternate syntax may be used for the same operation:
|
||||
|
||||
.B lvconvert \-\-thinpool VG/ThinDataLV \-\-poolmetadata VG/ThinMetaLV
|
||||
|
||||
The thin-pool type is inferred by lvm; the --thinpool option is not an
|
||||
alias for --type thin-pool. The use of the --thinpool option here is
|
||||
different from the use of the --thinpool option when creating a thin LV,
|
||||
where it specifies the pool in which the thin LV is created.
|
||||
|
||||
|
||||
.SS Automatic pool metadata LV
|
||||
|
||||
\&
|
||||
@@ -1234,7 +1215,7 @@ and creates a thin LV in the new pool.
|
||||
.br
|
||||
\-V VirtualSize specifies the virtual size of the thin LV.
|
||||
|
||||
.B lvcreate \-V VirtualSize \-L LargeSize
|
||||
.B lvcreate \-\-type thin \-V VirtualSize \-L LargeSize
|
||||
.RS
|
||||
.B \-n ThinLV \-\-thinpool VG/ThinPoolLV
|
||||
.RE
|
||||
|
||||
14
man/lvreduce.8.des
Normal file
14
man/lvreduce.8.des
Normal file
@@ -0,0 +1,14 @@
|
||||
lvreduce reduces the size of an LV. The freed logical extents are returned
|
||||
to the VG to be used by other LVs. A copy\-on\-write snapshot LV can also
|
||||
be reduced if less space is needed to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
||||
|
||||
Be careful when reducing an LV's size, because data in the reduced area is
|
||||
lost. Ensure that any file system on the LV is resized \fBbefore\fP
|
||||
running lvreduce so that the removed extents are not in use by the file
|
||||
system.
|
||||
|
||||
Sizes will be rounded if necessary. For example, the LV size must be an
|
||||
exact number of extents, and the size of a striped segment must be a
|
||||
multiple of the number of stripes.
|
||||
5
man/lvreduce.8.end
Normal file
5
man/lvreduce.8.end
Normal file
@@ -0,0 +1,5 @@
|
||||
.SH EXAMPLES
|
||||
|
||||
Reduce the size of an LV by 3 logical extents:
|
||||
.br
|
||||
.B lvreduce \-l \-3 vg00/lvol1
|
||||
@@ -1,110 +0,0 @@
|
||||
.TH LVREDUCE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
||||
.SH NAME
|
||||
lvreduce \(em reduce the size of a logical volume
|
||||
.SH SYNOPSIS
|
||||
.B lvreduce
|
||||
.RB [ \-A | \-\-autobackup
|
||||
.RI { y | n }]
|
||||
.RB [ \-\-commandprofile
|
||||
.IR ProfileName ]
|
||||
.RB [ \-d | \-\-debug ]
|
||||
.RB [ \-h | \-\-help ]
|
||||
.RB [ \-t | \-\-test ]
|
||||
.RB [ \-v | \-\-verbose ]
|
||||
.RB [ \-\-version ]
|
||||
.RB [ \-f | \-\-force ]
|
||||
.RB [ \-\-noudevsync ]
|
||||
.RB { \-l | \-\-extents
|
||||
.RI [ \- ] LogicalExtentsNumber [ % { VG | LV | FREE | ORIGIN }]
|
||||
.RB |
|
||||
.BR \-L | \-\-size
|
||||
.RI [ \- ] LogicalVolumeSize [ bBsSkKmMgGtTpPeE ]}
|
||||
.RB [ \-n | \-\-nofsck ]
|
||||
.RB [ \-\-reportformat
|
||||
.RB { basic | json }]
|
||||
.RB [ \-r | \-\-resizefs ]
|
||||
.IR LogicalVolume { Name | Path }
|
||||
.SH DESCRIPTION
|
||||
lvreduce allows you to reduce the size of a logical volume.
|
||||
Be careful when reducing a logical volume's size, because data in the
|
||||
reduced part is lost!!!
|
||||
.br
|
||||
You should therefore ensure that any filesystem on the volume is
|
||||
resized
|
||||
.I before
|
||||
running lvreduce so that the extents that are to be removed are not in use.
|
||||
.br
|
||||
Shrinking snapshot logical volumes (see
|
||||
.BR lvcreate (8)
|
||||
for information to create snapshots) is supported as well.
|
||||
But to change the number of copies in a mirrored logical
|
||||
volume use
|
||||
.BR lvconvert (8).
|
||||
.br
|
||||
Sizes will be rounded if necessary - for example, the volume size must
|
||||
be an exact number of extents and the size of a striped segment must
|
||||
be a multiple of the number of stripes.
|
||||
.br
|
||||
.SH OPTIONS
|
||||
See \fBlvm\fP(8) for common options.
|
||||
.TP
|
||||
.BR \-f ", " \-\-force
|
||||
Force size reduction without prompting even when it may cause data loss.
|
||||
.TP
|
||||
.IR \fB\-l ", " \fB\-\-extents " [" \- ] LogicalExtentsNumber [ % { VG | LV | FREE | ORIGIN }]
|
||||
Reduce or set the logical volume size in units of logical extents.
|
||||
With the \fI-\fP sign the value will be subtracted from
|
||||
the logical volume's actual size and without it the value will be taken
|
||||
as an absolute size.
|
||||
The total number of physical extents freed will be greater than this logical
|
||||
value if, for example, the volume is mirrored.
|
||||
The number can also be expressed as a percentage of the total space
|
||||
in the Volume Group with the suffix \fI%VG\fP, relative to the existing
|
||||
size of the Logical Volume with the suffix \fI%LV\fP, as a percentage of the
|
||||
remaining free space in the Volume Group with the suffix \fI%FREE\fP, or (for
|
||||
a snapshot) as a percentage of the total space in the Origin Logical
|
||||
Volume with the suffix \fI%ORIGIN\fP.
|
||||
The resulting value for the subtraction is rounded downward, for the absolute
|
||||
size it is rounded upward.
|
||||
N.B. In a future release, when expressed as a percentage with VG or FREE, the
|
||||
number will be treated as an approximate total number of physical extents to be
|
||||
freed (including extents used by any mirrors, for example). The code may
|
||||
currently release more space than you might otherwise expect.
|
||||
.TP
|
||||
.IR \fB\-L ", " \fB\-\-size " [" \- ] LogicalVolumeSize [ bBsSkKmMgGtTpPeE ]
|
||||
Reduce or set the logical volume size in units of megabytes.
|
||||
A size suffix of \fIk\fP for kilobyte, \fIm\fP for megabyte,
|
||||
\fIg\fP for gigabytes, \fIt\fP for terabytes, \fIp\fP for petabytes
|
||||
or \fIe\fP for exabytes is optional.
|
||||
With the \fI\-\fP sign the value will be subtracted from
|
||||
the logical volume's actual size and without it it will be taken as
|
||||
an absolute size.
|
||||
.TP
|
||||
.BR \-n ", " \-\-nofsck
|
||||
Do not perform fsck before resizing filesystem when filesystem
|
||||
requires it. You may need to use \fB\-\-force\fR to proceed with
|
||||
this option.
|
||||
.TP
|
||||
.BR \-\-noudevsync
|
||||
Disable udev synchronisation. The
|
||||
process will not wait for notification from udev.
|
||||
It will continue irrespective of any possible udev processing
|
||||
in the background. You should only use this if udev is not running
|
||||
or has rules that ignore the devices LVM2 creates.
|
||||
.TP
|
||||
.BR \-r ", " \-\-resizefs
|
||||
Resize underlying filesystem together with the logical volume using
|
||||
.BR fsadm (8).
|
||||
.SH Examples
|
||||
Reduce the size of logical volume lvol1 in volume group vg00 by 3 logical extents:
|
||||
.sp
|
||||
.B lvreduce \-l \-3 vg00/lvol1
|
||||
.SH SEE ALSO
|
||||
.BR fsadm (8),
|
||||
.BR lvchange (8),
|
||||
.BR lvconvert (8),
|
||||
.BR lvcreate (8),
|
||||
.BR lvextend (8),
|
||||
.BR lvm (8),
|
||||
.BR lvresize (8),
|
||||
.BR vgreduce (8)
|
||||
27
man/lvremove.8.des
Normal file
27
man/lvremove.8.des
Normal file
@@ -0,0 +1,27 @@
|
||||
lvremove removes one or more LVs. For standard LVs, this returns the
|
||||
logical extents that were used by the LV to the VG for use by other LVs.
|
||||
|
||||
Confirmation will be requested before deactivating any active LV prior to
|
||||
removal. LVs cannot be deactivated or removed while they are open (e.g.
|
||||
if they contain a mounted filesystem). Removing an origin LV will also
|
||||
remove all dependent snapshots.
|
||||
|
||||
When a single force option is used, LVs are removed without confirmation,
|
||||
and the command will try to deactivate unused LVs.
|
||||
|
||||
To remove damaged LVs, two force options may be required (\fB-ff\fP).
|
||||
|
||||
\fBHistorical LVs\fP
|
||||
|
||||
If the configuration setting \fBmetadata/record_lvs_history\fP is enabled
|
||||
and the LV being removed forms part of the history of at least one LV that
|
||||
is still present, then a simplified representation of the LV will be
|
||||
retained. This includes the time of removal (\fBlv_time_removed\fP
|
||||
reporting field), creation time (\fBlv_time\fP), name (\fBlv_name\fP), LV
|
||||
uuid (\fBlv_uuid\fP) and VG name (\fBvg_name\fP). This allows later
|
||||
reporting to see the ancestry chain of thin snapshot volumes, even after
|
||||
some intermediate LVs have been removed. The names of such historical LVs
|
||||
acquire a hyphen as a prefix (e.g. '-lvol1') and cannot be reactivated.
|
||||
Use lvremove a second time, with the hyphen, to remove the record of the
|
||||
former LV completely.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user