1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-08-30 05:49:28 +03:00

Compare commits

...

63 Commits

Author SHA1 Message Date
05716c2d8a clvmd: Fix stack overflow on 64 bit ARM
Seems the amount of allocated data on stack is dependent on page size.
As the page size on aarch64 is 64kiB writing to memory allocated by
alloca results in stack overflow as at the time of allocation the are
already 2 pages allocated. Clearly 128kiB is not sufficient and at least
3 pages are needed.
2014-09-16 17:34:32 +02:00
4a853361b0 vgchange: disable cluster convert for active LVs
While we could probably reacquire some type of lock when
going from non-clustered to clustered vg, we don't have any
single road back to drop the lock and keep LV active.

For now keep it safe and prohibit conversion when LV
is active in the VG.
2014-09-16 11:42:41 +02:00
1ce21c19d5 va_list: properly pass va_list through functions
Code should not just pass va_list arg through the function
as args could be passed in many strange ways.
Use va_copy().

For details look in i.e.:

http://julipedia.meroh.net/2011/09/using-vacopy-to-safely-pass-ap.html
2014-09-16 11:42:40 +02:00
b9c16b7506 devices: Detect rotational devices.
Add dev_is_rotational() for future use by allocation code.
2014-09-16 00:44:25 +01:00
979be63f25 mirrors: Fix checks for mirror/raid/pvmove LVs.
Try to enforce consistent macro usage along these lines:

lv_is_mirror - mirror that uses the original dm-raid1 implementation
               (segment type "mirror")
lv_is_mirror_type - also includes internal mirror image and log LVs

lv_is_raid - raid volume that uses the new dm-raid implementation
             (segment type "raid")
lv_is_raid_type - also includes internal raid image / log / metadata LVs

lv_is_mirrored - LV is mirrored using either kernel implementation
                 (excludes non-mirror modes like raid5 etc.)

lv_is_pvmove - internal pvmove volume
2014-09-16 00:13:46 +01:00
829e5a4037 cmirror: fix endian issues on s390
Cmirrord has endian bugs, which cause failure to lvcreate a mirrored lv
on s390.
- data_size is uint32, should not use xlate64 to convert, which will
  cause data_size 0 after xlate.
- request_type and data_size still used by local(v5_data_switch),
  should convert later.  If request_type xlate too early, it will
  cause request_type judge error; if data_size xlate too early, it
  will cause coredump in case DM_ULOG_CLEAR_REGION.
- when receiving package in clog_request_from_network. vp[0] will always
  be little endian.  We could use xlate64(vp[0]) == vp[0] to decide if
  the local node is little endian or not.

Signed-off-by: Lidong Zhong<lzhong@suse.com> & Liuhua Wang <lwang@suse.com>
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
2014-09-15 16:08:35 -05:00
e9216eedfe cleanup: fix last commit 2014-09-15 22:04:14 +01:00
2360ce3551 cleanup: Use lv_is_ macros.
Use lv_is_* macros throughout the code base, introducing
lv_is_pvmove, lv_is_locked, lv_is_converting and lv_is_merging.

lv_is_mirror_type no longer includes pvmove.
2014-09-15 21:33:53 +01:00
10a448eb2f tests: update lv_no_exists
On successful exit path remove debug.log file.
2014-09-15 13:51:19 +02:00
f435bef957 test: test there is no leak of LV on error path 2014-09-15 13:51:19 +02:00
75a5de1462 thin: check for active lv
Before calling deactivate, check the lv is actually active,
as we may reach this 'bad' error path with pool_lv inactive.
2014-09-15 13:51:19 +02:00
ef6508e9a4 WHATS_NEW for filter-related changes 2014-09-13 17:34:13 +02:00
30e0c0d863 libdm: finish the comment 2014-09-12 15:35:57 +02:00
5895657b59 libdm: fix dm_is_dm_major to not issue error about missing /proc lines for dm module.
This is probably better approach than 3880ca5eca.

If dm module is not loaded during dm_is_dm_major call, there are no
lines for dm in /proc/devices, of course. Normally, dm_is_dm_major
is called to check existing devices, hence if module is not loaded,
we can expect there's no DM device present at the same time so we
can directly return 0 here (meaning the major number being inspected
is not dm device's one).

See also https://bugzilla.redhat.com/show_bug.cgi?id=1059711.
2014-09-12 15:28:51 +02:00
25ae9383bb revert: commit 3880ca5eca
There's a better solution to this...
2014-09-12 15:28:51 +02:00
ae08a3a294 cleanup: skip unused assign
Reset of tmp_names is only needed in else{} path.
2014-09-12 13:51:31 +02:00
07b3e6cd74 cleanup: avoid strlen() we know max size
Just use max NAME_LEN size buffer and copy the name.
2014-09-12 13:51:31 +02:00
ab7977de7b cleanup: simplify _extract_image_components
Reorder test - first check for writable flag and then allocate.
2014-09-12 13:51:31 +02:00
6898131091 cleanup: missing error message 2014-09-12 13:51:31 +02:00
3e57143abd cleanup: better error messages 2014-09-12 13:51:30 +02:00
08914ed7c1 raid: destroy allocation handle on error path
Don't leak ah memory pool on error path.
2014-09-12 13:51:30 +02:00
76c3c94bd2 cleanup: update _alloc_image_component function
Return allocated volume directly instead of 1/0.
2014-09-12 13:51:30 +02:00
126463ad1f cleanup: plain code reindent
Just simple reindent and brace changes.
2014-09-12 13:51:30 +02:00
ad376e9e00 debug: add missing stack trace on error path 2014-09-12 13:51:29 +02:00
c10c16cc35 raid: use _generate_raid_name
Use new function to get implicit name validation
(so we do not exit with internal error on metadata validation).
2014-09-12 13:51:29 +02:00
2db0312455 raid: add function for name creation
Add name for construction and validation of raid subvolume
name with a given suffix.

TODO: check if reusable for mirrors as well.
2014-09-12 13:51:29 +02:00
40b7b107b1 raid: check result of get_segtype_from_string
Error here is rather highly unpexpected for these types, but
stay consistent with rest of the code and don't use unchecked value.
2014-09-12 13:45:50 +02:00
08bde75093 raid: add missing archive call
Before starting to update raid metadata, archive existing unmodified one.
2014-09-12 13:45:49 +02:00
569184a3bb raid: add missing vg_revert
After failing vg_write() and suspend_lv() there was missing vg_revert() call.
2014-09-12 13:45:14 +02:00
dd1fa0e808 raid: add missing backups
Add backup() calls that were missing after successful update
of metadata.
2014-09-12 13:42:57 +02:00
15ba2afdc2 allocation: use vg memory pool
Looks like forgotten memory allocation related to VG used cmd mem pool.
2014-09-12 13:39:58 +02:00
3880ca5eca libdm: use dm-mod autoloading during dm_is_dm_major call if needed
For dm_is_dm_major to determine whether the major number given as
an argument belongs to a DM device, libdm code needs to know what
the actual DM major is to do the comparison.

It may happen that the dm-mod module is not loaded during this
call and so for the completness let's try our best before we start
giving various errors - we can still make use of dm-mod autoloading,
though only since kernels 2.6.36 where this feature was introduced.
2014-09-12 12:49:37 +02:00
f0cafc9281 conf: add allocation/physical_extent_size config option for default PE size of VGs.
Removes a need to use "vgcreate -s <desired PE size>" all the
time time just to override hardcoded default which is 4096KiB.
2014-09-12 10:09:21 +02:00
80ac8f37d6 filters: fix incorrect filter indexing in composite filter array
Caused by recent changes - a7be3b12df.
If global filter was not defined, then part of the code
creating composite filter (the cmd->lvmetad_filter) incorrectly
increased index value even if this global filter was not created
as part of the composite filter. This caused a gap with "NULL"
value in the composite filter array which ended up with the rest
of the filters after the gap to be ignored and also it caused a mem
leak when destroying the composite filter.
2014-09-11 09:30:03 +02:00
4748f4a9e4 tests: test for rename of snapshot 2014-09-10 22:59:13 +01:00
671d0ea1b1 lvmetad: Differentiate between filtered and truly missing devices.
We used to print an error message whenever we tried to deal with devices that
lvmetad knew about but were rejected by a client-side filter. Instead, we now
check whether the device is actually absent or only filtered out and only print
a warning in the latter case.
2014-09-10 22:58:22 +01:00
5f9b30d178 test: Add a test for MD component detection in pvscan --cache. 2014-09-10 22:58:12 +01:00
a7be3b12df lvmetad: Re-organise filters to properly avoid scans of component devices.
If a PV label is exposed both through a composite device (MD for example) and
through its component devices, we always want the PV that lvmetad sees to be the
composite, since this is what all LVM commands (including activation) will then
use. If pvscan --cache is triggered for multiple clones of the same PV, the last
to finish wins. This patch basically re-arranges the filters so that
component-device filters are part of the global_filter chain, not of the
client-side filter chain. This has a subtle effect on filter evaluation order,
but should not alter visible semantics in the non-lvmetad case.
2014-09-10 22:58:02 +01:00
1f0c4c811d dev-cache: Filter wipe does not guarantee a full /dev scan.
The code in dev_iter_create assumes that if a filter can be wiped, doing so will
always trigger a call to _full_scan. This is not true for composite filters
though, since they can always be wiped in principle, but there is no way to know
that a component filter inside will exist that actually triggers the scan.
2014-09-10 22:57:49 +01:00
47ff145e08 debug: turn message into debug
This message should be printed only for activation commands,
however since the handling of this flag is not correct
(rhbz 1140029) and will require further changes,
do now just a minor change and switch message into log_debug
(so it's not printed i.e. with every  'lvs -v')
2014-09-10 10:10:13 +02:00
55aa3cc813 tests: test for rename of snapshot 2014-09-09 20:17:47 +02:00
a86d9a3b30 lv_rename: actual fix for snapshot
By my rebasing mistake it's been eliminated from previous patch set.
2014-09-09 20:15:51 +02:00
c710f02e01 lv_update_and_reload: replace code sequence
Use lv_update_and_reload() and lv_update_and_reload_origin()
to handle write/suspend/commit/resume sequence.

In few places this properly handle vg_revert() after suspend failure,
and also ensures there is metadata backup after successful vg_commit().
2014-09-09 19:20:09 +02:00
e4e50863a2 lvconvert: use lv_update_and_reload
Use lib function.
2014-09-09 19:15:26 +02:00
aee8611af5 lv_manip: remove vg_revert
vg_commit is supposed to have implicit revert handling.
(however as of now it needs fixes).
2014-09-09 19:15:26 +02:00
413fc9d3e6 lv_rename: fix snapshot rename
Fix rename operation for snapshot (cow) LV.
Only the snapshot's origin has the lock and by mistake suspend
and resume has been called for the snapshot LV.
This further made volumes unusable in cluster.

So instead of suspend and resuming list of LVs,
we need to just suspend and resume origin.

As the sequence write/suspend/commit/resume
is widely used in lvm2 code base - move it to
new lv_update_and_reload function.
2014-09-09 19:15:24 +02:00
319f67b1ab cleanup: add stacktrace for error path 2014-09-08 22:36:42 +02:00
c774d9a3f3 so: make sure shared libs are built with RELRO option
In addition to using RELRO for daemons, use this option for shared
libraries. See also commit a65ab773b4.
2014-09-04 10:52:41 +02:00
b25e0086b6 post-release 2014-09-01 01:53:44 +01:00
fcb433abec pre-release 2014-09-01 01:51:47 +01:00
fa1a0d170a cleanup: drop extra ()
Pure  '==' test doesn't need extra ().
2014-08-29 13:11:35 +02:00
2a0ec5d396 cleanup: drop duplicate const
No need to specify 'const' twice in these cases.
2014-08-29 13:11:34 +02:00
19375e4fca cleanup: assignment into ()
Put is_float=1 into () - so the intention is obvious.
Remove uneeded extra check for for  *s != 0,
since it's already checked for either digit or '.'.
2014-08-29 13:11:34 +02:00
db77041d93 makefiles: include path missing
For deps calcs path for blkid.h needs to be known.
2014-08-29 13:10:20 +02:00
ca32920b16 WHATS_NEW 2014-08-29 13:10:20 +02:00
3c8fa2aa01 clvmd: use correctly sized buffers for sscanf
sscanf needs extra 1 char for '\0'
2014-08-29 13:10:20 +02:00
91a453de05 WHATS_NEW_DM 2014-08-29 13:10:19 +02:00
93e9b3a1d1 libdm: revert incorrect path length size for sscanf
Commit 94786a3bbf introduced
another bug - since sscanf needs extra 1 byte for \0.

Since there is no easy way to do a macro evaluation for (PATH_MAX-1)
and string concatation of this number to get resulting (%4095s) - let's
go with easiest path and restore extra byte for 0.

Other option would be to prepare sscanf parsing string in runtime.

But lets resolve it when we look at PATH_MAX handling later...
2014-08-29 13:10:18 +02:00
2faf416e0e lvextend: Reinstate --nosync logic for mirrors.
Reinstate the logic for syncing extensions of mirrors created with
--nosync.  (Inadvertently disabled by the approximate allocation
changes.)
2014-08-28 00:40:09 +01:00
3003a9a7be WHATS_NEW 2014-08-27 16:52:32 +02:00
22bfac5dc2 cache: fix allocation size
Commit 0b3d0e79f6 caused regression
in allocation of cache pool. This patch is restoring corect size
for allocation.
2014-08-27 16:47:14 +02:00
8b9eb95ea9 cache: Clean-up error message.
It is not an internal error message to report to the user that they
cannot create a cache LV on top of a cache LV.  It is simply not
supported yet.
2014-08-24 19:44:37 -05:00
dd9700f192 post-release 2014-08-26 16:41:18 +01:00
61 changed files with 965 additions and 934 deletions

View File

@ -1 +1 @@
2.02.110(2)-git (2014-08-26)
2.02.112(2)-git (2014-09-01)

View File

@ -1 +1 @@
1.02.89-git (2014-08-26)
1.02.91-git (2014-09-01)

View File

@ -1,3 +1,32 @@
Version 2.02.112 -
=====================================
Disable vgchange of clustered attribute with any active LV in VG.
Use va_copy to properly pass va_list through functions.
Add function to detect rotational devices.
Review internal checks for mirror/raid/pvmove volumes.
Track mirror segment type with separate MIRROR flag.
Fix cmirror endian conversions.
Introduce lv_is_pvmove/locked/converting/merging macros.
Avoid leaving linear logical volume when thin pool creation fails.
Demote an error to a warning when devices known to lvmetad are filtered out.
Re-order filter evaluation, making component filters global.
Don't leak alloc_handle on raid target error path.
Properly validate raid leg names.
Archive metadata before starting their modification in raid target.
Add missing vg_revert in suspend_lv() error path in raid target.
Add missing backup of lvm2 metadata after some raid modifications.
Use vg memory pool for extent allocation.
Add allocation/physical_extent_size config option for default PE size of VGs.
Introduce common code to modify metadate and reload updated LV.
Fix rename of active snapshot volume in cluster.
Make sure shared libraries are built with RELRO option.
Version 2.02.111 - 1st September 2014
=====================================
Pass properly sized char buffers for sscanf when initializing clvmd.
Reinstate nosync logic when extending mirror. (2.02.110)
Fix total area extent calculation when allocating cache pool. (2.02.110)
Version 2.02.110 - 26th August 2014
===================================
Fix manipulation with thin-pools which are excluded via volume_list.

View File

@ -1,3 +1,11 @@
Version 1.02.91 -
====================================
Fix dm_is_dm_major to not issue error about missing /proc lines for dm module.
Version 1.02.90 - 1st September 2014
====================================
Restore proper buffer size for parsing mountinfo line (1.02.89)
Version 1.02.89 - 26th August 2014
==================================
Improve libdevmapper-event select() error handling.

View File

@ -370,6 +370,9 @@ allocation {
# first use.
# N.B. zeroing larger thin pool chunk size degrades performance.
# thin_pool_zero = 1
# Default physical extent size to use for newly created VGs (in KB).
# physical_extent_size = 4096
}
# This section that allows you to configure the nature of the

View File

@ -24,6 +24,7 @@
#include "clvmd.h"
#include "lvm-functions.h"
#include "lvm-version.h"
#include "lvm-wrappers.h"
#include "refresh_clvmd.h"
#ifdef HAVE_COROSYNC_CONFDB_H
@ -88,7 +89,7 @@ static debug_t debug = DEBUG_OFF;
static int foreground_mode = 0;
static pthread_t lvm_thread;
/* Stack size 128KiB for thread, must be bigger then DEFAULT_RESERVED_STACK */
static const size_t STACK_SIZE = 128 * 1024;
static const size_t MIN_STACK_SIZE = 128 * 1024;
static pthread_attr_t stack_attr;
static int lvm_thread_exit = 0;
static pthread_mutex_t lvm_thread_mutex;
@ -358,6 +359,7 @@ int main(int argc, char *argv[])
int clusterwide_opt = 0;
mode_t old_mask;
int ret = 1;
size_t stack_size;
struct option longopts[] = {
{ "help", 0, 0, 'h' },
@ -514,8 +516,10 @@ int main(int argc, char *argv[])
/* Initialise the LVM thread variables */
dm_list_init(&lvm_cmd_head);
stack_size = 3 * lvm_getpagesize();
stack_size = stack_size < MIN_STACK_SIZE ? MIN_STACK_SIZE : stack_size;
if (pthread_attr_init(&stack_attr) ||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE)) {
pthread_attr_setstacksize(&stack_attr, stack_size)) {
log_sys_error("pthread_attr_init", "");
exit(1);
}

View File

@ -725,7 +725,7 @@ void do_lock_vg(unsigned char command, unsigned char lock_flags, char *resource)
static int get_initial_state(struct dm_hash_table *excl_uuid)
{
int lock_mode;
char lv[64], vg[64], flags[25], vg_flags[25];
char lv[65], vg[65], flags[26], vg_flags[26]; /* with space for '\0' */
char uuid[65];
char line[255];
char *lvs_cmd;

View File

@ -126,13 +126,14 @@ static int v5_endian_to_network(struct clog_request *rq)
u_rq->error = xlate32(u_rq->error);
u_rq->seq = xlate32(u_rq->seq);
u_rq->request_type = xlate32(u_rq->request_type);
u_rq->data_size = xlate64(u_rq->data_size);
rq->originator = xlate32(rq->originator);
v5_data_endian_switch(rq, 1);
u_rq->request_type = xlate32(u_rq->request_type);
u_rq->data_size = xlate32(u_rq->data_size);
return size;
}
@ -167,7 +168,7 @@ static int v5_endian_from_network(struct clog_request *rq)
u_rq->error = xlate32(u_rq->error);
u_rq->seq = xlate32(u_rq->seq);
u_rq->request_type = xlate32(u_rq->request_type);
u_rq->data_size = xlate64(u_rq->data_size);
u_rq->data_size = xlate32(u_rq->data_size);
rq->originator = xlate32(rq->originator);
@ -187,7 +188,7 @@ int clog_request_from_network(void *data, size_t data_len)
switch (version) {
case 5: /* Upstream */
if (version == unconverted_version)
if (version == vp[0])
return 0;
break;
case 4: /* RHEL 5.[45] */

View File

@ -1273,7 +1273,7 @@ static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *l
int r;
struct dev_manager *dm;
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, !lv_is_pvmove(lv))))
return_0;
if (!(r = dev_manager_activate(dm, lv, laopts)))
@ -1290,7 +1290,7 @@ static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopt
struct dev_manager *dm;
int old_readonly = laopts->read_only;
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, !lv_is_pvmove(lv))))
goto_out;
laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
@ -1332,7 +1332,7 @@ static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *la
* When we are asked to manipulate (normally suspend/resume) the PVMOVE
* device directly, we don't want to touch the devices that use it.
*/
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, !lv_is_pvmove(lv))))
return_0;
if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
@ -1872,8 +1872,8 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
* tables for all the changed LVs here, as the relationships
* are not found by walking the new metadata.
*/
if (!(incore_lv->status & LOCKED) &&
(ondisk_lv->status & LOCKED) &&
if (!lv_is_locked(incore_lv) &&
lv_is_locked(ondisk_lv) &&
(pvmove_lv = find_pvmove_lv_in_lv(ondisk_lv))) {
/* Preload all the LVs above the PVMOVE LV */
dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
@ -1951,7 +1951,7 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
* can be called separately for each LV safely.
*/
if ((incore_lv->vg->status & PRECOMMITTED) &&
(incore_lv->status & LOCKED) && find_pvmove_lv_in_lv(incore_lv)) {
lv_is_locked(incore_lv) && find_pvmove_lv_in_lv(incore_lv)) {
if (!_lv_suspend_lv(incore_lv, laopts, lockfs, flush_required)) {
critical_section_dec(cmd, "failed precommitted suspend");
if (pvmove_lv)

View File

@ -1150,7 +1150,7 @@ int dev_manager_raid_message(struct dev_manager *dm,
struct dm_task *dmt;
const char *layer = lv_layer(lv);
if (!(lv->status & RAID)) {
if (!lv_is_raid(lv)) {
log_error(INTERNAL_ERROR "%s/%s is not a RAID logical volume",
lv->vg->name, lv->name);
return 0;
@ -1978,7 +1978,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
return_0;
/* Add any LVs referencing a PVMOVE LV unless told not to. */
if (dm->track_pvmove_deps && lv->status & PVMOVE) {
if (dm->track_pvmove_deps && lv_is_pvmove(lv)) {
dm->track_pvmove_deps = 0;
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (!_add_lv_to_dtree(dm, dtree, sl->seg->lv, origin_only))
@ -2729,7 +2729,7 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
/* Add any LVs referencing a PVMOVE LV unless told not to */
if (dm->track_pvmove_deps && (lv->status & PVMOVE))
if (dm->track_pvmove_deps && lv_is_pvmove(lv))
dm_list_iterate_items(sl, &lv->segs_using_this_lv)
if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
return_0;
@ -2917,8 +2917,7 @@ static int _tree_action(struct dev_manager *dm, struct logical_volume *lv,
break;
case SUSPEND:
dm_tree_skip_lockfs(root);
if (!dm->flush_required && !seg_is_raid(first_seg(lv)) &&
(lv->status & MIRRORED) && !(lv->status & PVMOVE))
if (!dm->flush_required && lv_is_mirror(lv) && !lv_is_pvmove(lv))
dm_tree_use_no_flush_suspend(root);
/* Fall through */
case SUSPEND_WITH_LOCKFS:

6
lib/cache/lvmetad.c vendored
View File

@ -293,7 +293,11 @@ static struct lvmcache_info *_pv_populate_lvmcache(struct cmd_context *cmd,
dev = dev_cache_get_by_devt(fallback, cmd->filter);
if (!dev) {
log_error("No device found for PV %s.", pvid_txt);
dev = dev_cache_get_by_devt(devt, cmd->lvmetad_filter);
if (!dev)
log_error("No device found for PV %s.", pvid_txt);
else
log_warn("WARNING: Device %s for PV %s rejected by a filter.", dev_name(dev), pvid_txt);
return NULL;
}

View File

@ -861,14 +861,13 @@ static struct dev_filter *_init_filter_components(struct cmd_context *cmd)
}
/* regex filter. Optional. */
if (!(cn = find_config_tree_node(cmd, devices_filter_CFG, NULL)))
log_very_verbose("devices/filter not found in config file: "
"no regex filter installed");
else if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create regex device filter");
goto bad;
} else
if ((cn = find_config_tree_node(cmd, devices_global_filter_CFG, NULL))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create global regex device filter");
goto bad;
}
nr_filt++;
}
/* device type filter. Required. */
if (!(filters[nr_filt] = lvm_type_filter_create(cmd->dev_types))) {
@ -918,16 +917,26 @@ static int _init_filters(struct cmd_context *cmd, unsigned load_persistent_cache
cmd->dump_filter = 0;
if (!(f3 = _init_filter_components(cmd)))
if (!(cmd->lvmetad_filter = _init_filter_components(cmd)))
goto_bad;
init_ignore_suspended_devices(find_config_tree_bool(cmd, devices_ignore_suspended_devices_CFG, NULL));
init_ignore_lvm_mirrors(find_config_tree_bool(cmd, devices_ignore_lvm_mirrors_CFG, NULL));
if ((cn = find_config_tree_node(cmd, devices_filter_CFG, NULL))) {
if (!(f3 = regex_filter_create(cn->v)))
goto_bad;
toplevel_components[0] = cmd->lvmetad_filter;
toplevel_components[1] = f3;
if (!(f4 = composite_filter_create(2, toplevel_components)))
goto_bad;
} else
f4 = cmd->lvmetad_filter;
if (!(dev_cache = find_config_tree_str(cmd, devices_cache_CFG, NULL)))
goto_bad;
if (!(f4 = persistent_filter_create(cmd->dev_types, f3, dev_cache))) {
if (!(cmd->filter = persistent_filter_create(cmd->dev_types, f4, dev_cache))) {
log_verbose("Failed to create persistent device filter.");
goto bad;
}
@ -948,29 +957,20 @@ static int _init_filters(struct cmd_context *cmd, unsigned load_persistent_cache
load_persistent_cache && !cmd->is_long_lived &&
!stat(dev_cache, &st) &&
(st.st_ctime > config_file_timestamp(cmd->cft)) &&
!persistent_filter_load(f4, NULL))
!persistent_filter_load(cmd->filter, NULL))
log_verbose("Failed to load existing device cache from %s",
dev_cache);
if (!(cn = find_config_tree_node(cmd, devices_global_filter_CFG, NULL))) {
cmd->filter = f4;
} else if (!(cmd->lvmetad_filter = regex_filter_create(cn->v)))
goto_bad;
else {
toplevel_components[0] = cmd->lvmetad_filter;
toplevel_components[1] = f4;
if (!(cmd->filter = composite_filter_create(2, toplevel_components)))
goto_bad;
}
return 1;
bad:
if (f4)
if (f4) /* kills both f3 and cmd->lvmetad_filter */
f4->destroy(f4);
else if (f3)
f3->destroy(f3);
if (toplevel_components[0])
toplevel_components[0]->destroy(toplevel_components[0]);
else {
if (f3)
f3->destroy(f3);
if (cmd->lvmetad_filter)
cmd->lvmetad_filter->destroy(cmd->lvmetad_filter);
}
return 0;
}

View File

@ -126,7 +126,7 @@ cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG
cfg(allocation_thin_pool_discards_CFG, "thin_pool_discards", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA, CFG_TYPE_STRING, DEFAULT_THIN_POOL_DISCARDS, vsn(2, 2, 99), NULL)
cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA, CFG_TYPE_STRING, DEFAULT_THIN_POOL_CHUNK_SIZE_POLICY, vsn(2, 2, 101), NULL)
cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), NULL)
cfg(allocation_physical_extent_size_CFG, "physical_extent_size", allocation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_EXTENT_SIZE, vsn(2, 2, 112), NULL)
cfg(log_verbose_CFG, "verbose", log_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_VERBOSE, vsn(1, 0, 0), NULL)
cfg(log_silent_CFG, "silent", log_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_SILENT, vsn(2, 2, 98), NULL)

View File

@ -1009,9 +1009,11 @@ struct dev_iter *dev_iter_create(struct dev_filter *f, int dev_scan)
if (dev_scan && !trust_cache()) {
/* Flag gets reset between each command */
if (!full_scan_done()) {
if (f && f->wipe)
f->wipe(f); /* Calls _full_scan(1) */
else
if (f && f->wipe) {
f->wipe(f); /* might call _full_scan(1) */
if (!full_scan_done())
_full_scan(1);
} else
_full_scan(1);
}
} else

View File

@ -655,23 +655,25 @@ static int _snprintf_attr(char *buf, size_t buf_size, const char *sysfs_dir,
static unsigned long _dev_topology_attribute(struct dev_types *dt,
const char *attribute,
struct device *dev)
struct device *dev,
unsigned long default_value)
{
const char *sysfs_dir = dm_sysfs_dir();
char path[PATH_MAX], buffer[64];
FILE *fp;
struct stat info;
dev_t uninitialized_var(primary);
unsigned long result = 0UL;
unsigned long result = default_value;
unsigned long value = 0UL;
if (!attribute || !*attribute)
return_0;
goto_out;
if (!sysfs_dir || !*sysfs_dir)
return_0;
goto_out;
if (!_snprintf_attr(path, sizeof(path), sysfs_dir, attribute, dev->dev))
return_0;
goto_out;
/*
* check if the desired sysfs attribute exists
@ -681,72 +683,79 @@ static unsigned long _dev_topology_attribute(struct dev_types *dt,
if (stat(path, &info) == -1) {
if (errno != ENOENT) {
log_sys_debug("stat", path);
return 0;
goto out;
}
if (!dev_get_primary_dev(dt, dev, &primary))
return 0;
goto out;
/* get attribute from partition's primary device */
if (!_snprintf_attr(path, sizeof(path), sysfs_dir, attribute, primary))
return_0;
goto_out;
if (stat(path, &info) == -1) {
if (errno != ENOENT)
log_sys_debug("stat", path);
return 0;
goto out;
}
}
if (!(fp = fopen(path, "r"))) {
log_sys_debug("fopen", path);
return 0;
goto out;
}
if (!fgets(buffer, sizeof(buffer), fp)) {
log_sys_debug("fgets", path);
goto out;
goto out_close;
}
if (sscanf(buffer, "%lu", &result) != 1) {
if (sscanf(buffer, "%lu", &value) != 1) {
log_warn("sysfs file %s not in expected format: %s", path, buffer);
goto out;
goto out_close;
}
log_very_verbose("Device %s %s is %lu bytes.",
dev_name(dev), attribute, result);
log_very_verbose("Device %s: %s is %lu%s.",
dev_name(dev), attribute, result, default_value ? "" : " bytes");
out:
result = value >> SECTOR_SHIFT;
out_close:
if (fclose(fp))
log_sys_debug("fclose", path);
return result >> SECTOR_SHIFT;
out:
return result;
}
unsigned long dev_alignment_offset(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "alignment_offset", dev);
return _dev_topology_attribute(dt, "alignment_offset", dev, 0UL);
}
unsigned long dev_minimum_io_size(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/minimum_io_size", dev);
return _dev_topology_attribute(dt, "queue/minimum_io_size", dev, 0UL);
}
unsigned long dev_optimal_io_size(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/optimal_io_size", dev);
return _dev_topology_attribute(dt, "queue/optimal_io_size", dev, 0UL);
}
unsigned long dev_discard_max_bytes(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/discard_max_bytes", dev);
return _dev_topology_attribute(dt, "queue/discard_max_bytes", dev, 0UL);
}
unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/discard_granularity", dev);
return _dev_topology_attribute(dt, "queue/discard_granularity", dev, 0UL);
}
int dev_is_rotational(struct dev_types *dt, struct device *dev)
{
return (int) _dev_topology_attribute(dt, "queue/rotational", dev, 1UL);
}
#else
int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result)
@ -779,4 +788,8 @@ unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev)
return 0UL;
}
int dev_is_rotational(struct dev_types *dt, struct device *dev)
{
return 1;
}
#endif

View File

@ -82,4 +82,6 @@ unsigned long dev_optimal_io_size(struct dev_types *dt, struct device *dev);
unsigned long dev_discard_max_bytes(struct dev_types *dt, struct device *dev);
unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev);
int dev_is_rotational(struct dev_types *dt, struct device *dev);
#endif

View File

@ -587,10 +587,10 @@ int lvdisplay_full(struct cmd_context *cmd,
display_size(cmd, (uint64_t) snap_seg->chunk_size));
}
if (lv->status & MIRRORED) {
if (lv_is_mirrored(lv)) {
mirror_seg = first_seg(lv);
log_print("Mirrored volumes %" PRIu32, mirror_seg->area_count);
if (lv->status & CONVERTING)
if (lv_is_converting(lv))
log_print("LV type Mirror undergoing conversion");
}

View File

@ -190,9 +190,12 @@ static int _out_with_comment_raw(struct formatter *f,
const char *fmt, va_list ap)
{
int n;
va_list apc;
va_copy(apc, ap);
n = vsnprintf(f->data.buf.start + f->data.buf.used,
f->data.buf.size - f->data.buf.used, fmt, ap);
f->data.buf.size - f->data.buf.used, fmt, apc);
va_end(apc);
/* If metadata doesn't fit, extend buffer */
if (n < 0 || (n + f->data.buf.used + 2 > f->data.buf.size)) {
@ -563,8 +566,8 @@ int out_areas(struct formatter *f, const struct lv_segment *seg,
}
/* RAID devices are laid-out in metadata/data pairs */
if (!(seg_lv(seg, s)->status & RAID_IMAGE) ||
!(seg_metalv(seg, s)->status & RAID_META)) {
if (!lv_is_raid_image(seg_lv(seg, s)) ||
!lv_is_raid_metadata(seg_metalv(seg, s))) {
log_error("RAID segment has non-RAID areas");
return 0;
}

View File

@ -67,6 +67,7 @@ static const struct flag _lv_flags[] = {
{RAID, NULL, 0},
{RAID_META, NULL, 0},
{RAID_IMAGE, NULL, 0},
{MIRROR, NULL, 0},
{MIRROR_IMAGE, NULL, 0},
{MIRROR_LOG, NULL, 0},
{MIRRORED, NULL, 0},

View File

@ -99,7 +99,7 @@ static int _is_converting(struct logical_volume *lv)
{
struct lv_segment *seg;
if (lv->status & MIRRORED) {
if (lv_is_mirrored(lv)) {
seg = first_seg(lv);
/* Can't use is_temporary_mirror() because the metadata for
* seg_lv may not be read in and flags may not be set yet. */
@ -386,6 +386,9 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
*/
_insert_segment(lv, seg);
if (seg_is_mirror(seg))
lv->status |= MIRROR;
if (seg_is_mirrored(seg))
lv->status |= MIRRORED;

View File

@ -117,9 +117,8 @@ struct logical_volume *lv_cache_create(struct logical_volume *pool,
* The origin under the origin would become *_corig_corig
* before renaming the origin above to *_corig.
*/
log_error(INTERNAL_ERROR
"The origin, %s, cannot be of cache type",
origin->name);
log_error("Creating a cache LV from an existing cache LV is"
"not yet supported.");
return NULL;
}
@ -176,7 +175,6 @@ static int _cleanup_orphan_lv(struct logical_volume *lv)
*/
int lv_cache_remove(struct logical_volume *cache_lv)
{
struct cmd_context *cmd = cache_lv->vg->cmd;
const char *policy_name;
uint64_t dirty_blocks;
struct lv_segment *cache_seg = first_seg(cache_lv);
@ -226,14 +224,8 @@ int lv_cache_remove(struct logical_volume *cache_lv)
cache_seg->policy_argv = NULL;
/* update the kernel to put the cleaner policy in place */
if (!vg_write(cache_lv->vg))
return_0;
if (!suspend_lv(cmd, cache_lv))
return_0;
if (!vg_commit(cache_lv->vg))
return_0;
if (!resume_lv(cmd, cache_lv))
return_0;
if (lv_update_and_reload(cache_lv))
return_0;
}
//FIXME: use polling to do this...
@ -257,7 +249,7 @@ int lv_cache_remove(struct logical_volume *cache_lv)
if (!remove_layer_from_lv(cache_lv, corigin_lv))
return_0;
if (!vg_write(cache_lv->vg))
if (!lv_update_and_reload(cache_lv))
return_0;
/*
@ -265,20 +257,12 @@ int lv_cache_remove(struct logical_volume *cache_lv)
* - the top-level cache LV
* - the origin
* - the cache_pool _cdata and _cmeta
*/
if (!suspend_lv(cmd, cache_lv))
return_0;
if (!vg_commit(cache_lv->vg))
return_0;
/* resume_lv on this (former) cache LV will resume all */
/*
*
* resume_lv on this (former) cache LV will resume all
*
* FIXME: currently we can't easily avoid execution of
* blkid on resumed error device
*/
if (!resume_lv(cmd, cache_lv))
return_0;
/*
* cleanup orphan devices

View File

@ -340,7 +340,7 @@ char *lv_convert_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
struct lv_segment *seg;
if (lv->status & (CONVERTING|MIRRORED)) {
if (lv_is_converting(lv) || lv_is_mirrored(lv)) {
seg = first_seg(lv);
/* Temporary mirror is always area_num == 0 */
@ -361,7 +361,7 @@ char *lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume *lv)
if (seg->status & PVMOVE) {
if (seg_type(seg, 0) == AREA_LV) { /* atomic pvmove */
mimage0_lv = seg_lv(seg, 0);
if (!lv_is_mirror_type(mimage0_lv)) {
if (!lv_is_mirror_image(mimage0_lv)) {
log_error(INTERNAL_ERROR
"Bad pvmove structure");
return NULL;
@ -505,7 +505,7 @@ int lv_raid_image_in_sync(const struct logical_volume *lv)
if (!lv_is_active_locally(lv))
return 0; /* Assume not in-sync */
if (!(lv->status & RAID_IMAGE)) {
if (!lv_is_raid_image(lv)) {
log_error(INTERNAL_ERROR "%s is not a RAID image", lv->name);
return 0;
}
@ -573,7 +573,7 @@ int lv_raid_healthy(const struct logical_volume *lv)
return 0;
}
if (lv->status & RAID)
if (lv_is_raid(lv))
raid_seg = first_seg(lv);
else if ((seg = first_seg(lv)))
raid_seg = get_only_segment_using_this_lv(seg->lv);
@ -592,7 +592,7 @@ int lv_raid_healthy(const struct logical_volume *lv)
if (!lv_raid_dev_health(raid_seg->lv, &raid_health))
return_0;
if (lv->status & RAID) {
if (lv_is_raid(lv)) {
if (strchr(raid_health, 'D'))
return 0;
else
@ -601,8 +601,8 @@ int lv_raid_healthy(const struct logical_volume *lv)
/* Find out which sub-LV this is. */
for (s = 0; s < raid_seg->area_count; s++)
if (((lv->status & RAID_IMAGE) && (seg_lv(raid_seg, s) == lv)) ||
((lv->status & RAID_META) && (seg_metalv(raid_seg,s) == lv)))
if ((lv_is_raid_image(lv) && (seg_lv(raid_seg, s) == lv)) ||
(lv_is_raid_metadata(lv) && (seg_metalv(raid_seg,s) == lv)))
break;
if (s == raid_seg->area_count) {
log_error(INTERNAL_ERROR
@ -633,7 +633,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
if (!*lv->name)
goto out;
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
repstr[0] = 'p';
else if (lv->status & CONVERTING)
repstr[0] = 'c';
@ -646,22 +646,22 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
repstr[0] = 'e';
else if (lv_is_cache_type(lv))
repstr[0] = 'C';
else if (lv->status & RAID)
else if (lv_is_raid(lv))
repstr[0] = (lv->status & LV_NOTSYNCED) ? 'R' : 'r';
else if (lv->status & MIRRORED)
else if (lv_is_mirror(lv))
repstr[0] = (lv->status & LV_NOTSYNCED) ? 'M' : 'm';
else if (lv_is_thin_volume(lv))
repstr[0] = lv_is_merging_origin(lv) ?
'O' : (lv_is_merging_thin_snapshot(lv) ? 'S' : 'V');
else if (lv->status & VIRTUAL)
else if (lv_is_virtual(lv))
repstr[0] = 'v';
else if (lv_is_thin_pool(lv))
repstr[0] = 't';
else if (lv_is_thin_pool_data(lv))
repstr[0] = 'T';
else if (lv->status & MIRROR_IMAGE)
else if (lv_is_mirror_image(lv))
repstr[0] = (lv_mirror_image_in_sync(lv)) ? 'i' : 'I';
else if (lv->status & RAID_IMAGE)
else if (lv_is_raid_image(lv))
/*
* Visible RAID_IMAGES are sub-LVs that have been exposed for
* top-level use by being split from the RAID array with
@ -669,7 +669,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
*/
repstr[0] = (!lv_is_visible(lv) && lv_raid_image_in_sync(lv)) ?
'i' : 'I';
else if (lv->status & MIRROR_LOG)
else if (lv_is_mirror_log(lv))
repstr[0] = 'l';
else if (lv_is_cow(lv))
repstr[0] = (lv_is_merging_cow(lv)) ? 'S' : 's';
@ -678,7 +678,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
else
repstr[0] = '-';
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
repstr[1] = '-';
else if (lv->status & LVM_WRITE)
repstr[1] = 'w';
@ -689,7 +689,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
repstr[2] = alloc_policy_char(lv->alloc);
if (lv->status & LOCKED)
if (lv_is_locked(lv))
repstr[2] = toupper(repstr[2]);
repstr[3] = (lv->status & FIXED_MINOR) ? 'm' : '-';
@ -743,7 +743,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
repstr[6] = 'C';
else if (lv_is_raid_type(lv))
repstr[6] = 'r';
else if (lv_is_mirror_type(lv))
else if (lv_is_mirror_type(lv) || lv_is_pvmove(lv))
repstr[6] = 'm';
else if (lv_is_cow(lv) || lv_is_origin(lv))
repstr[6] = 's';
@ -770,7 +770,7 @@ char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv)
repstr[8] = 'X'; /* Unknown */
else if (!lv_raid_healthy(lv))
repstr[8] = 'r'; /* RAID needs 'r'efresh */
else if (lv->status & RAID) {
else if (lv_is_raid(lv)) {
if (lv_raid_mismatch_count(lv, &n) && n)
repstr[8] = 'm'; /* RAID has 'm'ismatches */
} else if (lv->status & LV_WRITEMOSTLY)

View File

@ -191,7 +191,7 @@ static int _lv_layout_and_role_mirror(struct dm_pool *mem,
if (lv_is_mirrored(lv) &&
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_MIRROR]))
goto_bad;
} else if (lv->status & PVMOVE) {
} else if (lv_is_pvmove(lv)) {
if (!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_PVMOVE]) ||
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_MIRROR]))
goto_bad;
@ -231,6 +231,10 @@ static int _lv_layout_and_role_raid(struct dm_pool *mem,
if (!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_RAID]) ||
!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_METADATA]))
goto_bad;
} else if (lv_is_pvmove(lv)) {
if (!str_list_add_no_dup_check(mem, role, _lv_type_names[LV_TYPE_PVMOVE]) ||
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID]))
goto_bad;
} else
top_level = 1;
@ -464,7 +468,7 @@ int lv_layout_and_role(struct dm_pool *mem, const struct logical_volume *lv,
}
/* Mirrors and related */
if (lv_is_mirror_type(lv) && !lv_is_raid(lv) &&
if ((lv_is_mirror_type(lv) || lv_is_pvmove(lv)) &&
!_lv_layout_and_role_mirror(mem, lv, *layout, *role, &public_lv))
goto_bad;
@ -985,6 +989,12 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
if (log_lv && !attach_mirror_log(seg, log_lv))
return_NULL;
if (segtype_is_mirror(segtype))
lv->status |= MIRROR;
if (segtype_is_mirrored(segtype))
lv->status |= MIRRORED;
return seg;
}
@ -1035,9 +1045,9 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return 1;
}
if ((seg_lv(seg, s)->status & MIRROR_IMAGE) ||
(seg_lv(seg, s)->status & THIN_POOL_DATA) ||
(seg_lv(seg, s)->status & CACHE_POOL_DATA)) {
if (lv_is_mirror_image(seg_lv(seg, s)) ||
lv_is_thin_pool_data(seg_lv(seg, s)) ||
lv_is_cache_pool_data(seg_lv(seg, s))) {
if (!lv_reduce(seg_lv(seg, s), area_reduction))
return_0; /* FIXME: any upper level reporting */
return 1;
@ -1052,7 +1062,7 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return_0;
}
if (seg_lv(seg, s)->status & RAID_IMAGE) {
if (lv_is_raid_image(seg_lv(seg, s))) {
/*
* FIXME: Use lv_reduce not lv_remove
* We use lv_remove for now, because I haven't figured out
@ -1347,9 +1357,10 @@ int replace_lv_with_error_segment(struct logical_volume *lv)
* an error segment, we should also clear any flags
* that suggest it is anything other than "error".
*/
lv->status &= ~(MIRRORED|PVMOVE|LOCKED);
/* FIXME Check for other flags that need removing */
lv->status &= ~(MIRROR|MIRRORED|PVMOVE|LOCKED);
/* FIXME: Should we bug if we find a log_lv attached? */
/* FIXME Check for any attached LVs that will become orphans e.g. mirror logs */
if (!lv_add_virtual_segment(lv, 0, len, get_segtype_from_string(lv->vg->cmd, "error"), NULL))
return_0;
@ -1655,8 +1666,10 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
ah->region_size = 0;
ah->mirror_logs_separate =
find_config_tree_bool(cmd, allocation_cache_pool_metadata_require_separate_pvs_CFG, NULL);
if (!ah->mirror_logs_separate)
if (!ah->mirror_logs_separate) {
ah->alloc_and_split_meta = 1;
total_extents += ah->log_len;
}
} else {
ah->log_area_count = metadata_area_count;
ah->log_len = !metadata_area_count ? 0 :
@ -1856,9 +1869,6 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
lv->le_count += extents;
lv->size += (uint64_t) extents *lv->vg->extent_size;
if (segtype_is_mirrored(segtype))
lv->status |= MIRRORED;
return 1;
}
@ -3081,7 +3091,7 @@ struct alloc_handle *allocate_extents(struct volume_group *vg,
if (alloc >= ALLOC_INHERIT)
alloc = vg->alloc;
if (!(ah = _alloc_init(vg->cmd, vg->cmd->mem, segtype, alloc, approx_alloc,
if (!(ah = _alloc_init(vg->cmd, vg->vgmem, segtype, alloc, approx_alloc,
lv ? lv->le_count : 0, extents, mirrors, stripes, log_count,
vg->extent_size, region_size,
parallel_areas)))
@ -3201,7 +3211,7 @@ int lv_add_segmented_mirror_image(struct alloc_handle *ah,
struct segment_type *segtype;
struct logical_volume *orig_lv, *copy_lv;
if (!(lv->status & PVMOVE)) {
if (!lv_is_pvmove(lv)) {
log_error(INTERNAL_ERROR
"Non-pvmove LV, %s, passed as argument", lv->name);
return 0;
@ -3668,6 +3678,8 @@ int lv_extend(struct logical_volume *lv,
int log_count = 0;
struct alloc_handle *ah;
uint32_t sub_lv_count;
uint32_t old_extents;
uint32_t new_extents; /* Total logical size after extension. */
log_very_verbose("Adding segment of type %s to LV %s.", segtype->name, lv->name);
@ -3693,9 +3705,9 @@ int lv_extend(struct logical_volume *lv,
allocatable_pvs, alloc, approx_alloc, NULL)))
return_0;
extents = ah->new_extents;
new_extents = ah->new_extents;
if (segtype_is_raid(segtype))
extents -= ah->log_len * ah->area_multiple;
new_extents -= ah->log_len * ah->area_multiple;
if (segtype_is_thin_pool(segtype) || segtype_is_cache_pool(segtype)) {
if (lv->le_count) {
@ -3721,6 +3733,8 @@ int lv_extend(struct logical_volume *lv,
else
sub_lv_count = mirrors;
old_extents = lv->le_count;
if (!lv->le_count &&
!(r = _lv_insert_empty_sublvs(lv, segtype, stripe_size,
region_size, sub_lv_count))) {
@ -3728,7 +3742,7 @@ int lv_extend(struct logical_volume *lv,
goto out;
}
if (!(r = _lv_extend_layered_lv(ah, lv, extents - lv->le_count, 0,
if (!(r = _lv_extend_layered_lv(ah, lv, new_extents - lv->le_count, 0,
stripes, stripe_size)))
goto_out;
@ -3737,7 +3751,7 @@ int lv_extend(struct logical_volume *lv,
* resync of the extension if the LV is currently in-sync
* and the LV has the LV_NOTSYNCED flag set.
*/
if ((lv->le_count != extents) &&
if (old_extents &&
segtype_is_mirrored(segtype) &&
(lv->status & LV_NOTSYNCED)) {
dm_percent_t sync_percent = DM_PERCENT_INVALID;
@ -3797,7 +3811,7 @@ static int _rename_single_lv(struct logical_volume *lv, char *new_name)
return 0;
}
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Cannot rename locked LV %s", lv->name);
return 0;
}
@ -3934,10 +3948,7 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
const char *new_name, int update_mda)
{
struct volume_group *vg = lv->vg;
struct lv_names lv_names;
DM_LIST_INIT(lvs_changed);
struct lv_list lvl, lvl2, *lvlp;
int r = 0;
struct lv_names lv_names = { .old = lv->name };
/* rename is not allowed on sub LVs */
if (!lv_is_visible(lv)) {
@ -3951,59 +3962,33 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Cannot rename locked LV %s", lv->name);
return 0;
}
if (update_mda && !archive(vg))
return 0;
return_0;
/* rename sub LVs */
lv_names.old = lv->name;
lv_names.new = new_name;
if (!for_each_sub_lv(lv, _rename_cb, (void *) &lv_names))
return 0;
/* rename main LV */
if (!(lv->name = dm_pool_strdup(cmd->mem, new_name))) {
log_error("Failed to allocate space for new name");
if (!(lv_names.new = dm_pool_strdup(cmd->mem, new_name))) {
log_error("Failed to allocate space for new name.");
return 0;
}
if (!update_mda)
return 1;
/* rename sub LVs */
if (!for_each_sub_lv(lv, _rename_cb, (void *) &lv_names))
return_0;
lvl.lv = lv;
dm_list_add(&lvs_changed, &lvl.list);
/* rename main LV */
lv->name = lv_names.new;
/* rename active virtual origin too */
if (lv_is_cow(lv) && lv_is_virtual_origin(lvl2.lv = origin_from_cow(lv)))
dm_list_add_h(&lvs_changed, &lvl2.list);
if (lv_is_cow(lv))
lv = origin_from_cow(lv);
log_verbose("Writing out updated volume group");
if (!vg_write(vg))
return 0;
if (update_mda && !lv_update_and_reload(lv))
return_0;
if (!suspend_lvs(cmd, &lvs_changed, vg))
goto_out;
if (!(r = vg_commit(vg)))
stack;
/*
* FIXME: resume LVs in reverse order to prevent memory
* lock imbalance when resuming virtual snapshot origin
* (resume of snapshot resumes origin too)
*/
dm_list_iterate_back_items(lvlp, &lvs_changed)
if (!resume_lv(cmd, lvlp->lv)) {
r = 0;
stack;
}
out:
backup(vg);
return r;
return 1;
}
/*
@ -4339,7 +4324,7 @@ static int _lvresize_check_lv(struct cmd_context *cmd, struct logical_volume *lv
return 0;
}
if (lv->status & (RAID_IMAGE | RAID_META)) {
if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv)) {
log_error("Cannot resize a RAID %s directly",
(lv->status & RAID_IMAGE) ? "image" :
"metadata area");
@ -4379,12 +4364,12 @@ static int _lvresize_check_lv(struct cmd_context *cmd, struct logical_volume *lv
return 0;
}
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Can't resize locked LV %s", lv->name);
return 0;
}
if (lv->status & CONVERTING) {
if (lv_is_converting(lv)) {
log_error("Can't resize %s while lvconvert in progress", lv->name);
return 0;
}
@ -5066,26 +5051,8 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
}
/* store vg on disk(s) */
if (!vg_write(vg))
goto_out;
if (!suspend_lv(cmd, lock_lv)) {
log_error("Failed to suspend %s", lock_lv->name);
vg_revert(vg);
goto bad;
}
if (!vg_commit(vg)) {
stack;
if (!resume_lv(cmd, lock_lv))
stack;
goto bad;
}
if (!resume_lv(cmd, lock_lv)) {
log_error("Problem reactivating %s", lock_lv->name);
goto bad;
}
if (!lv_update_and_reload(lock_lv))
goto_bad;
if (lv_is_cow_covering_origin(lv))
if (!monitor_dev_for_events(cmd, lv, 0, 0))
@ -5096,15 +5063,14 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
if (!update_pool_lv(lock_lv, 0))
goto_bad;
backup(vg);
if (inactive && !deactivate_lv(cmd, lock_lv)) {
log_error("Problem deactivating %s.", lock_lv->name);
backup(vg);
return 0;
}
}
backup(vg);
log_print_unless_silent("Logical volume %s successfully resized", lp->lv_name);
if (lp->resizefs && (lp->resize == LV_EXTEND) &&
@ -5112,10 +5078,7 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
return_0;
return 1;
bad:
backup(vg);
out:
if (inactive && !deactivate_lv(cmd, lock_lv))
log_error("Problem deactivating %s.", lock_lv->name);
@ -5424,19 +5387,19 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
if (lv->status & MIRROR_IMAGE) {
if (lv_is_mirror_image(lv)) {
log_error("Can't remove logical volume %s used by a mirror",
lv->name);
return 0;
}
if (lv->status & MIRROR_LOG) {
if (lv_is_mirror_log(lv)) {
log_error("Can't remove logical volume %s used as mirror log",
lv->name);
return 0;
}
if (lv->status & (RAID_META | RAID_IMAGE)) {
if (lv_is_raid_metadata(lv) || lv_is_raid_image(lv)) {
log_error("Can't remove logical volume %s used as RAID device",
lv->name);
return 0;
@ -5450,7 +5413,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
} else if (lv_is_thin_volume(lv))
pool_lv = first_seg(lv)->pool_lv;
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Can't remove locked LV %s", lv->name);
return 0;
}
@ -5733,6 +5696,51 @@ no_remove:
return 0;
}
static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
{
struct volume_group *vg = lv->vg;
int do_backup = 0, r = 0;
log_very_verbose("Updating logical volume %s on disk(s).",
display_lvname(lv));
if (!vg_write(vg))
return_0;
if (!(origin_only ? suspend_lv_origin(vg->cmd, lv) : suspend_lv(vg->cmd, lv))) {
log_error("Failed to lock logical volume %s.",
display_lvname(lv));
vg_revert(vg);
} else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implict vg_revert() */
else
do_backup = 1;
log_very_verbose("Updating logical volume %s in kernel.",
display_lvname(lv));
if (!(origin_only ? resume_lv_origin(vg->cmd, lv) : resume_lv(vg->cmd, lv))) {
log_error("Problem reactivating logical volume %s.",
display_lvname(lv));
r = 0;
}
if (do_backup)
backup(vg);
return r;
}
int lv_update_and_reload(struct logical_volume *lv)
{
return _lv_update_and_reload(lv, 0);
}
int lv_update_and_reload_origin(struct logical_volume *lv)
{
return _lv_update_and_reload(lv, 1);
}
/*
* insert_layer_for_segments_on_pv() inserts a layer segment for a segment area.
* However, layer modification could split the underlying layer segment.
@ -6078,7 +6086,6 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
if (!vg_commit(lv_where->vg)) {
log_error("Failed to commit intermediate VG %s metadata for mirror conversion.", lv_where->vg->name);
vg_revert(lv_where->vg);
return NULL;
}
@ -6616,7 +6623,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (pool_lv->status & LOCKED) {
if (lv_is_locked(pool_lv)) {
log_error("Caching locked devices is not supported.");
return NULL;
}
@ -6642,7 +6649,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (org->status & LOCKED) {
if (lv_is_locked(org)) {
log_error("Caching locked devices is not supported.");
return NULL;
}
@ -6671,7 +6678,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (org->status & LOCKED) {
if (lv_is_locked(org)) {
log_error("Snapshots of locked devices are not supported.");
return NULL;
}
@ -6704,7 +6711,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
"supported yet.");
return NULL;
}
if (org->status & LOCKED) {
if (lv_is_locked(org)) {
log_error("Snapshots of locked devices are not "
"supported yet");
return NULL;
@ -6724,8 +6731,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (lv_is_mirror_type(org) &&
!seg_is_raid(first_seg(org))) {
if (lv_is_mirror_type(org)) {
log_warn("WARNING: Snapshots of mirrors can deadlock under rare device failures.");
log_warn("WARNING: Consider using the raid1 mirror type to avoid this.");
log_warn("WARNING: See global/mirror_segtype_default in lvm.conf.");
@ -6934,9 +6940,10 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
* I say that would be cleaner, but I'm not sure
* about the effects on thinpool yet...
*/
if (!vg_write(vg) || !suspend_lv(cmd, lv) ||
!vg_commit(vg) || !resume_lv(cmd, lv))
if (!lv_update_and_reload(lv)) {
stack;
goto deactivate_and_revert_new_lv;
}
if (!(lvl = find_lv_in_vg(vg, lp->origin)))
goto deactivate_and_revert_new_lv;

View File

@ -45,12 +45,12 @@ int lv_merge_segments(struct logical_volume *lv)
* having a matching segment structure.
*/
if (lv->status & LOCKED || lv->status & PVMOVE)
if (lv_is_locked(lv) || lv_is_pvmove(lv))
return 1;
if ((lv->status & MIRROR_IMAGE) &&
if (lv_is_mirror_image(lv) &&
(seg = get_only_segment_using_this_lv(lv)) &&
(seg->lv->status & LOCKED || seg->lv->status & PVMOVE))
(lv_is_locked(seg->lv) || lv_is_pvmove(seg->lv)))
return 1;
dm_list_iterate_safe(segh, t, &lv->segments) {
@ -159,7 +159,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
* Check mirror log - which is attached to the mirrored seg
*/
if (complete_vg && seg->log_lv && seg_is_mirrored(seg)) {
if (!(seg->log_lv->status & MIRROR_LOG)) {
if (!lv_is_mirror_log(seg->log_lv)) {
log_error("LV %s: segment %u log LV %s is not "
"a mirror log",
lv->name, seg_count, seg->log_lv->name);
@ -346,7 +346,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
}
if (complete_vg && seg_lv(seg, s) &&
(seg_lv(seg, s)->status & MIRROR_IMAGE) &&
lv_is_mirror_image(seg_lv(seg, s)) &&
(!(seg2 = find_seg_by_le(seg_lv(seg, s),
seg_le(seg, s))) ||
find_mirror_seg(seg2) != seg)) {

View File

@ -42,62 +42,63 @@
/* Various flags */
/* Note that the bits no longer necessarily correspond to LVM1 disk format */
#define PARTIAL_VG UINT64_C(0x00000001) /* VG */
#define EXPORTED_VG UINT64_C(0x00000002) /* VG PV */
#define RESIZEABLE_VG UINT64_C(0x00000004) /* VG */
#define PARTIAL_VG UINT64_C(0x0000000000000001) /* VG */
#define EXPORTED_VG UINT64_C(0x0000000000000002) /* VG PV */
#define RESIZEABLE_VG UINT64_C(0x0000000000000004) /* VG */
/* May any free extents on this PV be used or must they be left free? */
#define ALLOCATABLE_PV UINT64_C(0x00000008) /* PV */
#define ALLOCATABLE_PV UINT64_C(0x0000000000000008) /* PV */
#define ARCHIVED_VG ALLOCATABLE_PV /* VG, reuse same bit */
//#define SPINDOWN_LV UINT64_C(0x00000010) /* LV */
//#define BADBLOCK_ON UINT64_C(0x00000020) /* LV */
#define VISIBLE_LV UINT64_C(0x00000040) /* LV */
#define FIXED_MINOR UINT64_C(0x00000080) /* LV */
//#define SPINDOWN_LV UINT64_C(0x0000000000000010) /* LV */
//#define BADBLOCK_ON UINT64_C(0x0000000000000020) /* LV */
#define VISIBLE_LV UINT64_C(0x0000000000000040) /* LV */
#define FIXED_MINOR UINT64_C(0x0000000000000080) /* LV */
#define LVM_READ UINT64_C(0x00000100) /* LV, VG */
#define LVM_WRITE UINT64_C(0x00000200) /* LV, VG */
#define LVM_READ UINT64_C(0x0000000000000100) /* LV, VG */
#define LVM_WRITE UINT64_C(0x0000000000000200) /* LV, VG */
#define CLUSTERED UINT64_C(0x00000400) /* VG */
//#define SHARED UINT64_C(0x00000800) /* VG */
#define CLUSTERED UINT64_C(0x0000000000000400) /* VG */
//#define SHARED UINT64_C(0x0000000000000800) /* VG */
/* FIXME Remove when metadata restructuring is completed */
#define SNAPSHOT UINT64_C(0x00001000) /* LV - internal use only */
#define PVMOVE UINT64_C(0x00002000) /* VG LV SEG */
#define LOCKED UINT64_C(0x00004000) /* LV */
#define MIRRORED UINT64_C(0x00008000) /* LV - internal use only */
//#define VIRTUAL UINT64_C(0x00010000) /* LV - internal use only */
#define MIRROR_LOG UINT64_C(0x00020000) /* LV */
#define MIRROR_IMAGE UINT64_C(0x00040000) /* LV */
#define SNAPSHOT UINT64_C(0x0000000000001000) /* LV - internal use only */
#define PVMOVE UINT64_C(0x0000000000002000) /* VG LV SEG */
#define LOCKED UINT64_C(0x0000000000004000) /* LV */
#define MIRRORED UINT64_C(0x0000000000008000) /* LV - internal use only */
//#define VIRTUAL UINT64_C(0x0000000000010000) /* LV - internal use only */
#define MIRROR UINT64_C(0x0002000000000000) /* LV - Internal use only */
#define MIRROR_LOG UINT64_C(0x0000000000020000) /* LV - Internal use only */
#define MIRROR_IMAGE UINT64_C(0x0000000000040000) /* LV - Internal use only */
#define LV_NOTSYNCED UINT64_C(0x00080000) /* LV */
#define LV_REBUILD UINT64_C(0x00100000) /* LV */
//#define PRECOMMITTED UINT64_C(0x00200000) /* VG - internal use only */
#define CONVERTING UINT64_C(0x00400000) /* LV */
#define LV_NOTSYNCED UINT64_C(0x0000000000080000) /* LV */
#define LV_REBUILD UINT64_C(0x0000000000100000) /* LV */
//#define PRECOMMITTED UINT64_C(0x0000000000200000) /* VG - internal use only */
#define CONVERTING UINT64_C(0x0000000000400000) /* LV */
#define MISSING_PV UINT64_C(0x00800000) /* PV */
#define PARTIAL_LV UINT64_C(0x01000000) /* LV - derived flag, not
#define MISSING_PV UINT64_C(0x0000000000800000) /* PV */
#define PARTIAL_LV UINT64_C(0x0000000001000000) /* LV - derived flag, not
written out in metadata*/
//#define POSTORDER_FLAG UINT64_C(0x02000000) /* Not real flags, reserved for
//#define POSTORDER_OPEN_FLAG UINT64_C(0x04000000) temporary use inside vg_read_internal. */
//#define VIRTUAL_ORIGIN UINT64_C(0x08000000) /* LV - internal use only */
//#define POSTORDER_FLAG UINT64_C(0x0000000002000000) /* Not real flags, reserved for
//#define POSTORDER_OPEN_FLAG UINT64_C(0x0000000004000000) temporary use inside vg_read_internal. */
//#define VIRTUAL_ORIGIN UINT64_C(0x0000000008000000) /* LV - internal use only */
#define MERGING UINT64_C(0x10000000) /* LV SEG */
#define MERGING UINT64_C(0x0000000010000000) /* LV SEG */
#define REPLICATOR UINT64_C(0x20000000) /* LV -internal use only for replicator */
#define REPLICATOR_LOG UINT64_C(0x40000000) /* LV -internal use only for replicator-dev */
#define UNLABELLED_PV UINT64_C(0x80000000) /* PV -this PV had no label written yet */
#define REPLICATOR UINT64_C(0x0000000020000000) /* LV -internal use only for replicator */
#define REPLICATOR_LOG UINT64_C(0x0000000040000000) /* LV -internal use only for replicator-dev */
#define UNLABELLED_PV UINT64_C(0x0000000080000000) /* PV -this PV had no label written yet */
#define RAID UINT64_C(0x0000000100000000) /* LV */
#define RAID_META UINT64_C(0x0000000200000000) /* LV */
#define RAID_IMAGE UINT64_C(0x0000000400000000) /* LV */
#define RAID UINT64_C(0x0000000100000000) /* LV - Internal use only */
#define RAID_META UINT64_C(0x0000000200000000) /* LV - Internal use only */
#define RAID_IMAGE UINT64_C(0x0000000400000000) /* LV - Internal use only */
#define THIN_VOLUME UINT64_C(0x0000001000000000) /* LV */
#define THIN_POOL UINT64_C(0x0000002000000000) /* LV */
#define THIN_POOL_DATA UINT64_C(0x0000004000000000) /* LV */
#define THIN_POOL_METADATA UINT64_C(0x0000008000000000) /* LV */
#define POOL_METADATA_SPARE UINT64_C(0x0000010000000000) /* LV internal */
#define THIN_VOLUME UINT64_C(0x0000001000000000) /* LV - Internal use only */
#define THIN_POOL UINT64_C(0x0000002000000000) /* LV - Internal use only */
#define THIN_POOL_DATA UINT64_C(0x0000004000000000) /* LV - Internal use only */
#define THIN_POOL_METADATA UINT64_C(0x0000008000000000) /* LV - Internal use only */
#define POOL_METADATA_SPARE UINT64_C(0x0000010000000000) /* LV - Internal use only */
#define LV_WRITEMOSTLY UINT64_C(0x0000020000000000) /* LV (RAID1) */
@ -110,10 +111,12 @@
this flag dropped during single
LVM command execution. */
#define CACHE_POOL UINT64_C(0x0000200000000000) /* LV */
#define CACHE_POOL_DATA UINT64_C(0x0000400000000000) /* LV */
#define CACHE_POOL_METADATA UINT64_C(0x0000800000000000) /* LV */
#define CACHE UINT64_C(0x0001000000000000) /* LV */
#define CACHE_POOL UINT64_C(0x0000200000000000) /* LV - Internal use only */
#define CACHE_POOL_DATA UINT64_C(0x0000400000000000) /* LV - Internal use only */
#define CACHE_POOL_METADATA UINT64_C(0x0000800000000000) /* LV - Internal use only */
#define CACHE UINT64_C(0x0001000000000000) /* LV - Internal use only */
/* Next unused flag: UINT64_C(0x0004000000000000) */
/* Format features flags */
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
@ -162,34 +165,44 @@
#define vg_is_archived(vg) (((vg)->status & ARCHIVED_VG) ? 1 : 0)
#define lv_is_locked(lv) (((lv)->status & LOCKED) ? 1 : 0)
#define lv_is_virtual(lv) (((lv)->status & VIRTUAL) ? 1 : 0)
#define lv_is_merging(lv) (((lv)->status & MERGING) ? 1 : 0)
#define lv_is_converting(lv) (((lv)->status & CONVERTING) ? 1 : 0)
#define lv_is_external_origin(lv) (((lv)->external_count > 0) ? 1 : 0)
#define lv_is_thin_volume(lv) (((lv)->status & (THIN_VOLUME)) ? 1 : 0)
#define lv_is_thin_pool(lv) (((lv)->status & (THIN_POOL)) ? 1 : 0)
#define lv_is_used_thin_pool(lv) (lv_is_thin_pool(lv) && !dm_list_empty(&(lv)->segs_using_this_lv))
#define lv_is_thin_pool_data(lv) (((lv)->status & (THIN_POOL_DATA)) ? 1 : 0)
#define lv_is_thin_pool_metadata(lv) (((lv)->status & (THIN_POOL_METADATA)) ? 1 : 0)
#define lv_is_mirrored(lv) (((lv)->status & (MIRRORED)) ? 1 : 0)
#define lv_is_rlog(lv) (((lv)->status & (REPLICATOR_LOG)) ? 1 : 0)
#define lv_is_thin_volume(lv) (((lv)->status & THIN_VOLUME) ? 1 : 0)
#define lv_is_thin_pool(lv) (((lv)->status & THIN_POOL) ? 1 : 0)
#define lv_is_used_thin_pool(lv) (lv_is_thin_pool(lv) && !dm_list_empty(&(lv)->segs_using_this_lv))
#define lv_is_thin_pool_data(lv) (((lv)->status & THIN_POOL_DATA) ? 1 : 0)
#define lv_is_thin_pool_metadata(lv) (((lv)->status & THIN_POOL_METADATA) ? 1 : 0)
#define lv_is_thin_type(lv) (((lv)->status & (THIN_POOL | THIN_VOLUME | THIN_POOL_DATA | THIN_POOL_METADATA)) ? 1 : 0)
#define lv_is_mirror_type(lv) (((lv)->status & (MIRROR_LOG | MIRROR_IMAGE | MIRRORED | PVMOVE)) ? 1 : 0)
#define lv_is_mirror_image(lv) (((lv)->status & (MIRROR_IMAGE)) ? 1 : 0)
#define lv_is_mirror_log(lv) (((lv)->status & (MIRROR_LOG)) ? 1 : 0)
#define lv_is_raid(lv) (((lv)->status & (RAID)) ? 1 : 0)
#define lv_is_raid_image(lv) (((lv)->status & (RAID_IMAGE)) ? 1 : 0)
#define lv_is_raid_metadata(lv) (((lv)->status & (RAID_META)) ? 1 : 0)
#define lv_is_mirrored(lv) (((lv)->status & MIRRORED) ? 1 : 0)
#define lv_is_mirror_image(lv) (((lv)->status & MIRROR_IMAGE) ? 1 : 0)
#define lv_is_mirror_log(lv) (((lv)->status & MIRROR_LOG) ? 1 : 0)
#define lv_is_mirror(lv) (((lv)->status & MIRROR) ? 1 : 0)
#define lv_is_mirror_type(lv) (((lv)->status & (MIRROR | MIRROR_LOG | MIRROR_IMAGE)) ? 1 : 0)
#define lv_is_pvmove(lv) (((lv)->status & PVMOVE) ? 1 : 0)
#define lv_is_raid(lv) (((lv)->status & RAID) ? 1 : 0)
#define lv_is_raid_image(lv) (((lv)->status & RAID_IMAGE) ? 1 : 0)
#define lv_is_raid_metadata(lv) (((lv)->status & RAID_META) ? 1 : 0)
#define lv_is_raid_type(lv) (((lv)->status & (RAID | RAID_IMAGE | RAID_META)) ? 1 : 0)
#define lv_is_cache(lv) (((lv)->status & (CACHE)) ? 1 : 0)
#define lv_is_cache_pool(lv) (((lv)->status & (CACHE_POOL)) ? 1 : 0)
#define lv_is_cache_pool_data(lv) (((lv)->status & (CACHE_POOL_DATA)) ? 1 : 0)
#define lv_is_cache_pool_metadata(lv) (((lv)->status & (CACHE_POOL_METADATA)) ? 1 : 0)
#define lv_is_cache(lv) (((lv)->status & CACHE) ? 1 : 0)
#define lv_is_cache_pool(lv) (((lv)->status & CACHE_POOL) ? 1 : 0)
#define lv_is_cache_pool_data(lv) (((lv)->status & CACHE_POOL_DATA) ? 1 : 0)
#define lv_is_cache_pool_metadata(lv) (((lv)->status & CACHE_POOL_METADATA) ? 1 : 0)
#define lv_is_cache_type(lv) (((lv)->status & (CACHE | CACHE_POOL | CACHE_POOL_DATA | CACHE_POOL_METADATA)) ? 1 : 0)
#define lv_is_virtual(lv) (((lv)->status & (VIRTUAL)) ? 1 : 0)
#define lv_is_pool(lv) (((lv)->status & (CACHE_POOL | THIN_POOL)) ? 1 : 0)
#define lv_is_pool_metadata(lv) (((lv)->status & (CACHE_POOL_METADATA | THIN_POOL_METADATA)) ? 1 : 0)
#define lv_is_pool_metadata_spare(lv) (((lv)->status & (POOL_METADATA_SPARE)) ? 1 : 0)
#define lv_is_pool_metadata_spare(lv) (((lv)->status & POOL_METADATA_SPARE) ? 1 : 0)
#define lv_is_rlog(lv) (((lv)->status & REPLICATOR_LOG) ? 1 : 0)
int lv_layout_and_role(struct dm_pool *mem, const struct logical_volume *lv,
struct dm_list **layout, struct dm_list **role);
@ -700,6 +713,10 @@ int lv_rename(struct cmd_context *cmd, struct logical_volume *lv,
int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
const char *new_name, int update_mda);
/* Updates and reloads metadata for given lv */
int lv_update_and_reload(struct logical_volume *lv);
int lv_update_and_reload_origin(struct logical_volume *lv);
uint64_t extents_from_size(struct cmd_context *cmd, uint64_t size,
uint32_t extent_size);

View File

@ -2586,7 +2586,7 @@ int vg_validate(struct volume_group *vg)
}
dm_list_iterate_items(lvl, &vg->lvs) {
if (!(lvl->lv->status & PVMOVE))
if (!lv_is_pvmove(lvl->lv))
continue;
dm_list_iterate_items(seg, &lvl->lv->segments) {
if (seg_is_mirrored(seg)) {

View File

@ -42,9 +42,7 @@
*/
int is_temporary_mirror_layer(const struct logical_volume *lv)
{
if (lv->status & MIRROR_IMAGE
&& lv->status & MIRRORED
&& !(lv->status & LOCKED))
if (lv_is_mirror_image(lv) && lv_is_mirrored(lv) && !lv_is_locked(lv))
return 1;
return 0;
@ -58,7 +56,7 @@ struct logical_volume *find_temporary_mirror(const struct logical_volume *lv)
{
struct lv_segment *seg;
if (!(lv->status & MIRRORED))
if (!lv_is_mirrored(lv))
return NULL;
seg = first_seg(lv);
@ -109,7 +107,7 @@ uint32_t lv_mirror_count(const struct logical_volume *lv)
struct lv_segment *seg;
uint32_t s, mirrors;
if (!(lv->status & MIRRORED))
if (!lv_is_mirrored(lv))
return 1;
seg = first_seg(lv);
@ -118,7 +116,7 @@ uint32_t lv_mirror_count(const struct logical_volume *lv)
if (!strcmp(seg->segtype->name, "raid10"))
return 2;
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
return seg->area_count;
mirrors = 0;
@ -612,7 +610,7 @@ static int _split_mirror_images(struct logical_volume *lv,
struct lv_list *lvl;
struct cmd_context *cmd = lv->vg->cmd;
if (!(lv->status & MIRRORED)) {
if (!lv_is_mirrored(lv)) {
log_error("Unable to split non-mirrored LV, %s",
lv->name);
return 0;
@ -744,6 +742,7 @@ static int _split_mirror_images(struct logical_volume *lv,
detached_log_lv = detach_mirror_log(mirrored_seg);
if (!remove_layer_from_lv(lv, sub_lv))
return_0;
lv->status &= ~MIRROR;
lv->status &= ~MIRRORED;
lv->status &= ~LV_NOTSYNCED;
}
@ -943,6 +942,7 @@ static int _remove_mirror_images(struct logical_volume *lv,
* mirror. Fix up the flags if we only have one image left.
*/
if (lv_mirror_count(lv) == 1) {
lv->status &= ~MIRROR;
lv->status &= ~MIRRORED;
lv->status &= ~LV_NOTSYNCED;
}
@ -950,7 +950,7 @@ static int _remove_mirror_images(struct logical_volume *lv,
if (remove_log && !detached_log_lv)
detached_log_lv = detach_mirror_log(mirrored_seg);
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
dm_list_iterate_items(pvmove_seg, &lv->segments)
pvmove_seg->status |= PVMOVE;
} else if (new_area_count == 0) {
@ -959,6 +959,7 @@ static int _remove_mirror_images(struct logical_volume *lv,
/* All mirror images are gone.
* It can happen for vgreduce --removemissing. */
detached_log_lv = detach_mirror_log(mirrored_seg);
lv->status &= ~MIRROR;
lv->status &= ~MIRRORED;
lv->status &= ~LV_NOTSYNCED;
if (!replace_lv_with_error_segment(lv))
@ -1504,9 +1505,10 @@ const char *get_pvmove_pvname_from_lv_mirr(struct logical_volume *lv_mirr)
dm_list_iterate_items(seg, &lv_mirr->segments) {
if (!seg_is_mirrored(seg))
continue;
if (seg_type(seg, 0) != AREA_PV)
continue;
return dev_name(seg_dev(seg, 0));
if (seg_type(seg, 0) == AREA_PV)
return dev_name(seg_dev(seg, 0));
if (seg_type(seg, 0) == AREA_LV)
return dev_name(seg_dev(first_seg(seg_lv(seg, 0)), 0));
}
return NULL;
@ -1524,7 +1526,7 @@ struct logical_volume *find_pvmove_lv_in_lv(struct logical_volume *lv)
for (s = 0; s < seg->area_count; s++) {
if (seg_type(seg, s) != AREA_LV)
continue;
if (seg_lv(seg, s)->status & PVMOVE)
if (lv_is_pvmove(seg_lv(seg, s)))
return seg_lv(seg, s);
}
}
@ -2116,7 +2118,7 @@ int lv_add_mirrors(struct cmd_context *cmd, struct logical_volume *lv,
if (vg_is_clustered(lv->vg)) {
/* FIXME: move this test out of this function */
/* Skip test for pvmove mirrors, it can use local mirror */
if (!(lv->status & (PVMOVE | LOCKED)) &&
if (!lv_is_pvmove(lv) && !lv_is_locked(lv) &&
lv_is_active(lv) &&
!lv_is_active_exclusive_locally(lv) && /* lv_is_active_remotely */
!_cluster_mirror_is_available(lv)) {
@ -2251,7 +2253,7 @@ int lv_remove_mirrors(struct cmd_context *cmd __attribute__((unused)),
/* MIRROR_BY_LV */
if (seg_type(seg, 0) == AREA_LV &&
seg_lv(seg, 0)->status & MIRROR_IMAGE)
lv_is_mirror_image(seg_lv(seg, 0)))
return remove_mirror_images(lv, new_mirrors + 1,
is_removable, removable_baton,
log_count ? 1U : 0);

View File

@ -450,7 +450,8 @@ int create_pool(struct logical_volume *pool_lv,
bad:
if (activation()) {
if (deactivate_lv_local(pool_lv->vg->cmd, pool_lv)) {
if (lv_is_active_locally(pool_lv) &&
deactivate_lv_local(pool_lv->vg->cmd, pool_lv)) {
log_error("Aborting. Could not deactivate pool %s.",
pool_lv->name);
return 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011-2014 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -13,6 +13,7 @@
*/
#include "lib.h"
#include "archiver.h"
#include "metadata.h"
#include "toolcontext.h"
#include "segtype.h"
@ -25,10 +26,9 @@ static int _lv_is_raid_with_tracking(const struct logical_volume *lv,
struct logical_volume **tracking)
{
uint32_t s;
struct lv_segment *seg;
const struct lv_segment *seg = first_seg(lv);
*tracking = NULL;
seg = first_seg(lv);
if (!(lv->status & RAID))
return 0;
@ -38,7 +38,6 @@ static int _lv_is_raid_with_tracking(const struct logical_volume *lv,
!(seg_lv(seg, s)->status & LVM_WRITE))
*tracking = seg_lv(seg, s);
return *tracking ? 1 : 0;
}
@ -129,11 +128,8 @@ static int _raid_remove_top_layer(struct logical_volume *lv,
return 0;
}
lvl_array = dm_pool_alloc(lv->vg->vgmem, 2 * sizeof(*lvl));
if (!lvl_array) {
log_error("Memory allocation failed.");
return 0;
}
if (!(lvl_array = dm_pool_alloc(lv->vg->vgmem, 2 * sizeof(*lvl))))
return_0;
/* Add last metadata area to removal_list */
lvl_array[0].lv = seg_metalv(seg, 0);
@ -153,6 +149,7 @@ static int _raid_remove_top_layer(struct logical_volume *lv,
return_0;
lv->status &= ~(MIRRORED | RAID);
return 1;
}
@ -319,34 +316,50 @@ static int _shift_and_rename_image_components(struct lv_segment *seg)
return 1;
}
/* Generate raid subvolume name and validate it */
static char *_generate_raid_name(struct logical_volume *lv,
const char *suffix, int count)
{
const char *format = (count >= 0) ? "%s_%s_%u" : "%s_%s";
size_t len = strlen(lv->name) + strlen(suffix) + ((count >= 0) ? 5 : 2);
char *name;
if (!(name = dm_pool_alloc(lv->vg->vgmem, len))) {
log_error("Failed to allocate new name.");
return NULL;
}
if (dm_snprintf(name, len, format, lv->name, suffix, count) < 0)
return_NULL;
if (!validate_name(name)) {
log_error("New logical volume name \"%s\" is not valid.", name);
return NULL;
}
if (find_lv_in_vg(lv->vg, name)) {
log_error("Logical volume %s already exists in volume group %s.",
name, lv->vg->name);
return NULL;
}
return name;
}
/*
* Create an LV of specified type. Set visible after creation.
* This function does not make metadata changes.
*/
static int _alloc_image_component(struct logical_volume *lv,
const char *alt_base_name,
struct alloc_handle *ah, uint32_t first_area,
uint64_t type, struct logical_volume **new_lv)
static struct logical_volume *_alloc_image_component(struct logical_volume *lv,
const char *alt_base_name,
struct alloc_handle *ah, uint32_t first_area,
uint64_t type)
{
uint64_t status;
size_t len = strlen(lv->name) + 32;
char img_name[len];
const char *base_name = (alt_base_name) ? alt_base_name : lv->name;
char img_name[NAME_LEN];
const char *type_suffix;
struct logical_volume *tmp_lv;
const struct segment_type *segtype;
if (type == RAID_META) {
if (dm_snprintf(img_name, len, "%s_rmeta_%%d", base_name) < 0)
return_0;
} else if (type == RAID_IMAGE) {
if (dm_snprintf(img_name, len, "%s_rimage_%%d", base_name) < 0)
return_0;
} else {
log_error(INTERNAL_ERROR
"Bad type provided to _alloc_raid_component");
return 0;
}
if (!ah) {
log_error(INTERNAL_ERROR
"Stand-alone %s area allocation not implemented",
@ -354,22 +367,40 @@ static int _alloc_image_component(struct logical_volume *lv,
return 0;
}
status = LVM_READ | LVM_WRITE | LV_REBUILD | type;
tmp_lv = lv_create_empty(img_name, NULL, status, ALLOC_INHERIT, lv->vg);
if (!tmp_lv) {
log_error("Failed to allocate new raid component, %s", img_name);
switch (type) {
case RAID_META:
type_suffix = "rmeta";
break;
case RAID_IMAGE:
type_suffix = "rimage";
break;
default:
log_error(INTERNAL_ERROR
"Bad type provided to _alloc_raid_component.");
return 0;
}
segtype = get_segtype_from_string(lv->vg->cmd, "striped");
if (dm_snprintf(img_name, sizeof(img_name), "%s_%s_%%d",
(alt_base_name) ? : lv->name, type_suffix) < 0)
return_0;
status = LVM_READ | LVM_WRITE | LV_REBUILD | type;
if (!(tmp_lv = lv_create_empty(img_name, NULL, status, ALLOC_INHERIT, lv->vg))) {
log_error("Failed to allocate new raid component, %s.", img_name);
return 0;
}
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "striped")))
return_0;
if (!lv_add_segment(ah, first_area, 1, tmp_lv, segtype, 0, status, 0)) {
log_error("Failed to add segment to LV, %s", img_name);
return 0;
}
lv_set_visible(tmp_lv);
*new_lv = tmp_lv;
return 1;
return tmp_lv;
}
static int _alloc_image_components(struct logical_volume *lv,
@ -384,12 +415,10 @@ static int _alloc_image_components(struct logical_volume *lv,
const struct segment_type *segtype;
struct alloc_handle *ah;
struct dm_list *parallel_areas;
struct logical_volume *tmp_lv;
struct lv_list *lvl_array;
lvl_array = dm_pool_alloc(lv->vg->vgmem,
sizeof(*lvl_array) * count * 2);
if (!lvl_array)
if (!(lvl_array = dm_pool_alloc(lv->vg->vgmem,
sizeof(*lvl_array) * count * 2)))
return_0;
if (!(parallel_areas = build_parallel_areas_from_lv(lv, 0, 1)))
@ -422,26 +451,30 @@ static int _alloc_image_components(struct logical_volume *lv,
lv->alloc, 0, parallel_areas)))
return_0;
for (s = 0; s < count; s++) {
for (s = 0; s < count; ++s) {
/*
* The allocation areas are grouped together. First
* come the rimage allocated areas, then come the metadata
* allocated areas. Thus, the metadata areas are pulled
* from 's + count'.
*/
if (!_alloc_image_component(lv, NULL, ah, s + count,
RAID_META, &tmp_lv))
if (!(lvl_array[s + count].lv =
_alloc_image_component(lv, NULL, ah, s + count, RAID_META))) {
alloc_destroy(ah);
return_0;
lvl_array[s + count].lv = tmp_lv;
}
dm_list_add(new_meta_lvs, &(lvl_array[s + count].list));
if (!_alloc_image_component(lv, NULL, ah, s,
RAID_IMAGE, &tmp_lv))
if (!(lvl_array[s].lv =
_alloc_image_component(lv, NULL, ah, s, RAID_IMAGE))) {
alloc_destroy(ah);
return_0;
lvl_array[s].lv = tmp_lv;
}
dm_list_add(new_data_lvs, &(lvl_array[s].list));
}
alloc_destroy(ah);
return 1;
}
@ -459,7 +492,7 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
struct dm_list allocatable_pvs;
struct alloc_handle *ah;
struct lv_segment *seg = first_seg(data_lv);
char *p, base_name[strlen(data_lv->name) + 1];
char *p, base_name[NAME_LEN];
dm_list_init(&allocatable_pvs);
@ -469,7 +502,7 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
return 0;
}
sprintf(base_name, "%s", data_lv->name);
(void) dm_strncpy(base_name, data_lv->name, sizeof(base_name));
if ((p = strstr(base_name, "_mimage_")))
*p = '\0';
@ -486,11 +519,13 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
&allocatable_pvs, data_lv->alloc, 0, NULL)))
return_0;
if (!_alloc_image_component(data_lv, base_name, ah, 0,
RAID_META, meta_lv))
if (!(*meta_lv = _alloc_image_component(data_lv, base_name, ah, 0, RAID_META))) {
alloc_destroy(ah);
return_0;
}
alloc_destroy(ah);
return 1;
}
@ -502,7 +537,6 @@ static int _raid_add_images(struct logical_volume *lv,
uint32_t old_count = lv_raid_image_count(lv);
uint32_t count = new_count - old_count;
uint64_t status_mask = -1;
struct cmd_context *cmd = lv->vg->cmd;
struct lv_segment *seg = first_seg(lv);
struct dm_list meta_lvs, data_lvs;
struct lv_list *lvl;
@ -520,6 +554,9 @@ static int _raid_add_images(struct logical_volume *lv,
return 0;
}
if (!archive(lv->vg))
return_0;
dm_list_init(&meta_lvs); /* For image addition */
dm_list_init(&data_lvs); /* For image addition */
@ -556,21 +593,14 @@ static int _raid_add_images(struct logical_volume *lv,
* commits the LVM metadata before clearing the LVs.
*/
if (seg_is_linear(seg)) {
char *name;
size_t len;
struct dm_list *l;
struct lv_list *lvl_tmp;
dm_list_iterate(l, &data_lvs) {
if (l == dm_list_last(&data_lvs)) {
lvl = dm_list_item(l, struct lv_list);
len = strlen(lv->name) + sizeof("_rimage_XXX");
if (!(name = dm_pool_alloc(lv->vg->vgmem, len))) {
log_error("Failed to allocate rimage name.");
return 0;
}
sprintf(name, "%s_rimage_%u", lv->name, count);
lvl->lv->name = name;
if (!(lvl->lv->name = _generate_raid_name(lv, "rimage", count)))
return_0;
continue;
}
lvl = dm_list_item(l, struct lv_list);
@ -679,29 +709,8 @@ to be left for these sub-lvs.
dm_list_iterate_items(lvl, &data_lvs)
lv_set_hidden(lvl->lv);
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv_origin(cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!resume_lv_origin(cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!lv_update_and_reload_origin(lv))
return_0;
/*
* Now that the 'REBUILD' has made its way to the kernel, we must
@ -717,24 +726,27 @@ to be left for these sub-lvs.
rebuild_flag_cleared = 1;
}
}
if (rebuild_flag_cleared &&
(!vg_write(lv->vg) || !vg_commit(lv->vg))) {
log_error("Failed to clear REBUILD flag for %s/%s components",
lv->vg->name, lv->name);
return 0;
if (rebuild_flag_cleared) {
if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("Failed to clear REBUILD flag for %s/%s components",
lv->vg->name, lv->name);
return 0;
}
backup(lv->vg);
}
return 1;
fail:
/* Cleanly remove newly-allocated LVs that failed insertion attempt */
dm_list_iterate_items(lvl, &meta_lvs)
if (!lv_remove(lvl->lv))
return_0;
dm_list_iterate_items(lvl, &data_lvs)
if (!lv_remove(lvl->lv))
return_0;
return 0;
}
@ -763,9 +775,6 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
struct logical_volume **extracted_rmeta,
struct logical_volume **extracted_rimage)
{
int len;
char *tmp_name;
struct volume_group *vg = seg->lv->vg;
struct logical_volume *data_lv = seg_lv(seg, idx);
struct logical_volume *meta_lv = seg_metalv(seg, idx);
@ -785,19 +794,11 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
seg_type(seg, idx) = AREA_UNASSIGNED;
seg_metatype(seg, idx) = AREA_UNASSIGNED;
len = strlen(meta_lv->name) + strlen("_extracted") + 1;
tmp_name = dm_pool_alloc(vg->vgmem, len);
if (!tmp_name)
if (!(data_lv->name = _generate_raid_name(data_lv, "_extracted", -1)))
return_0;
sprintf(tmp_name, "%s_extracted", meta_lv->name);
meta_lv->name = tmp_name;
len = strlen(data_lv->name) + strlen("_extracted") + 1;
tmp_name = dm_pool_alloc(vg->vgmem, len);
if (!tmp_name)
if (!(meta_lv->name = _generate_raid_name(meta_lv, "_extracted", -1)))
return_0;
sprintf(tmp_name, "%s_extracted", data_lv->name);
data_lv->name = tmp_name;
*extracted_rmeta = meta_lv;
*extracted_rimage = data_lv;
@ -844,12 +845,12 @@ static int _raid_extract_images(struct logical_volume *lv, uint32_t new_count,
return 0;
}
lvl_array = dm_pool_alloc(lv->vg->vgmem,
sizeof(*lvl_array) * extract * 2);
if (!lvl_array)
if (!(lvl_array = dm_pool_alloc(lv->vg->vgmem,
sizeof(*lvl_array) * extract * 2)))
return_0;
error_segtype = get_segtype_from_string(lv->vg->cmd, "error");
if (!(error_segtype = get_segtype_from_string(lv->vg->cmd, "error")))
return_0;
/*
* We make two passes over the devices.
@ -873,15 +874,17 @@ static int _raid_extract_images(struct logical_volume *lv, uint32_t new_count,
* must come first.
*/
log_error("%s has components with error targets"
" that must be removed first: %s",
lv->name, seg_lv(seg, s)->name);
" that must be removed first: %s.",
display_lvname(lv),
display_lvname(seg_lv(seg, s)));
log_error("Try removing the PV list and rerun"
" the command.");
return 0;
}
log_debug("LVs with error segments to be removed: %s %s",
seg_metalv(seg, s)->name, seg_lv(seg, s)->name);
display_lvname(seg_metalv(seg, s)),
display_lvname(seg_lv(seg, s)));
} else {
/* Conditions for second pass */
if (!target_pvs || !lv_is_on_pvs(seg_lv(seg, s), target_pvs) ||
@ -928,6 +931,9 @@ static int _raid_remove_images(struct logical_volume *lv,
struct dm_list removal_list;
struct lv_list *lvl;
if (!archive(lv->vg))
return_0;
dm_list_init(&removal_list);
if (!_raid_extract_images(lv, new_count, pvs, 1,
@ -957,6 +963,7 @@ static int _raid_remove_images(struct logical_volume *lv,
if (!suspend_lv(lv->vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
vg_revert(lv->vg);
return 0;
}
@ -1000,6 +1007,8 @@ static int _raid_remove_images(struct logical_volume *lv,
return_0;
}
backup(lv->vg);
return 1;
}
@ -1092,14 +1101,14 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
"while tracking changes for %s",
lv->name, tracking->name);
return 0;
} else {
/* Ensure we only split the tracking image */
dm_list_init(&tracking_pvs);
splittable_pvs = &tracking_pvs;
if (!get_pv_list_for_lv(tracking->vg->cmd->mem,
tracking, splittable_pvs))
return_0;
}
/* Ensure we only split the tracking image */
dm_list_init(&tracking_pvs);
splittable_pvs = &tracking_pvs;
if (!get_pv_list_for_lv(tracking->vg->cmd->mem,
tracking, splittable_pvs))
return_0;
}
if (!_raid_extract_images(lv, new_count, splittable_pvs, 1,
@ -1130,6 +1139,7 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
if (!suspend_lv(cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
vg_revert(lv->vg);
return 0;
}
@ -1147,6 +1157,7 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
*/
if (!activate_lv_excl_local(cmd, lvl->lv))
return_0;
dm_list_iterate_items(lvl, &removal_list)
if (!activate_lv_excl_local(cmd, lvl->lv))
return_0;
@ -1171,6 +1182,8 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
@ -1210,7 +1223,7 @@ int lv_raid_split_and_track(struct logical_volume *lv,
return 0;
}
for (s = seg->area_count - 1; s >= 0; s--) {
for (s = seg->area_count - 1; s >= 0; --s) {
if (!lv_is_on_pvs(seg_lv(seg, s), splittable_pvs))
continue;
lv_set_visible(seg_lv(seg, s));
@ -1223,37 +1236,15 @@ int lv_raid_split_and_track(struct logical_volume *lv,
return 0;
}
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv(lv->vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!lv_update_and_reload(lv))
return_0;
log_print_unless_silent("%s split from %s for read-only purposes.",
seg_lv(seg, s)->name, lv->name);
/* Resume original LV */
if (!resume_lv(lv->vg->cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
lv->vg->name, lv->name);
return 0;
}
/* Activate the split (and tracking) LV */
if (!_activate_sublv_preserving_excl(lv, seg_lv(seg, s)))
return 0;
return_0;
log_print_unless_silent("Use 'lvconvert --merge %s/%s' to merge back into %s",
lv->vg->name, seg_lv(seg, s)->name, lv->name);
@ -1270,75 +1261,58 @@ int lv_raid_merge(struct logical_volume *image_lv)
struct lv_segment *seg;
struct volume_group *vg = image_lv->vg;
lv_name = dm_pool_strdup(vg->vgmem, image_lv->name);
if (!lv_name)
if (image_lv->status & LVM_WRITE) {
log_error("%s is not read-only - refusing to merge.",
display_lvname(image_lv));
return 0;
}
if (!(lv_name = dm_pool_strdup(vg->vgmem, image_lv->name)))
return_0;
if (!(p = strstr(lv_name, "_rimage_"))) {
log_error("Unable to merge non-mirror image %s/%s",
vg->name, image_lv->name);
log_error("Unable to merge non-mirror image %s.",
display_lvname(image_lv));
return 0;
}
*p = '\0'; /* lv_name is now that of top-level RAID */
if (image_lv->status & LVM_WRITE) {
log_error("%s/%s is not read-only - refusing to merge",
vg->name, image_lv->name);
if (!(lvl = find_lv_in_vg(vg, lv_name))) {
log_error("Unable to find containing RAID array for %s.",
display_lvname(image_lv));
return 0;
}
if (!(lvl = find_lv_in_vg(vg, lv_name))) {
log_error("Unable to find containing RAID array for %s/%s",
vg->name, image_lv->name);
return 0;
}
lv = lvl->lv;
seg = first_seg(lv);
for (s = 0; s < seg->area_count; s++) {
if (seg_lv(seg, s) == image_lv) {
for (s = 0; s < seg->area_count; ++s)
if (seg_lv(seg, s) == image_lv)
meta_lv = seg_metalv(seg, s);
}
if (!meta_lv) {
log_error("Failed to find meta for %s in RAID array %s.",
display_lvname(image_lv),
display_lvname(lv));
return 0;
}
if (!meta_lv)
return_0;
if (!deactivate_lv(vg->cmd, meta_lv)) {
log_error("Failed to deactivate %s", meta_lv->name);
log_error("Failed to deactivate %s before merging.",
display_lvname(meta_lv));
return 0;
}
if (!deactivate_lv(vg->cmd, image_lv)) {
log_error("Failed to deactivate %s/%s before merging",
vg->name, image_lv->name);
log_error("Failed to deactivate %s before merging.",
display_lvname(image_lv));
return 0;
}
lv_set_hidden(image_lv);
image_lv->status |= (lv->status & LVM_WRITE);
image_lv->status |= RAID_IMAGE;
if (!vg_write(vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, vg->name);
return 0;
}
if (!suspend_lv(vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
vg->name, lv->name);
return 0;
}
if (!vg_commit(vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, vg->name);
return 0;
}
if (!resume_lv(vg->cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
vg->name, lv->name);
return 0;
}
if (!lv_update_and_reload(lv))
return_0;
log_print_unless_silent("%s/%s successfully merged back into %s/%s",
vg->name, image_lv->name, vg->name, lv->name);
@ -1353,6 +1327,7 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
struct lv_list lvl_array[seg->area_count], *lvl;
struct dm_list meta_lvs;
struct lv_segment_area *meta_areas;
char *new_name;
dm_list_init(&meta_lvs);
@ -1362,13 +1337,15 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
return 0;
}
meta_areas = dm_pool_zalloc(lv->vg->vgmem,
lv_mirror_count(lv) * sizeof(*meta_areas));
if (!meta_areas) {
log_error("Failed to allocate memory");
if (!(meta_areas = dm_pool_zalloc(lv->vg->vgmem,
lv_mirror_count(lv) * sizeof(*meta_areas)))) {
log_error("Failed to allocate meta areas memory.");
return 0;
}
if (!archive(lv->vg))
return_0;
for (s = 0; s < seg->area_count; s++) {
log_debug_metadata("Allocating new metadata LV for %s",
seg_lv(seg, s)->name);
@ -1414,18 +1391,9 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
s++;
}
for (s = 0; s < seg->area_count; s++) {
char *new_name;
new_name = dm_pool_zalloc(lv->vg->vgmem,
strlen(lv->name) +
strlen("_rimage_XXn"));
if (!new_name) {
log_error("Failed to rename mirror images");
return 0;
}
sprintf(new_name, "%s_rimage_%u", lv->name, s);
for (s = 0; s < seg->area_count; ++s) {
if (!(new_name = _generate_raid_name(seg_lv(seg, s), "rimage", s)))
return_0;
log_debug_metadata("Renaming %s to %s", seg_lv(seg, s)->name, new_name);
seg_lv(seg, s)->name = new_name;
seg_lv(seg, s)->status &= ~MIRROR_IMAGE;
@ -1435,33 +1403,13 @@ static int _convert_mirror_to_raid1(struct logical_volume *lv,
log_debug_metadata("Setting new segtype for %s", lv->name);
seg->segtype = new_segtype;
lv->status &= ~MIRROR;
lv->status &= ~MIRRORED;
lv->status |= RAID;
seg->status |= RAID;
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv(lv->vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!resume_lv(lv->vg->cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
@ -1620,6 +1568,9 @@ int lv_raid_replace(struct logical_volume *lv,
return 0;
}
if (!archive(lv->vg))
return_0;
/*
* How many sub-LVs are being removed?
*/
@ -1685,8 +1636,10 @@ int lv_raid_replace(struct logical_volume *lv,
try_again:
if (!_alloc_image_components(lv, allocate_pvs, match_count,
&new_meta_lvs, &new_data_lvs)) {
if (!(lv->status & PARTIAL_LV))
if (!(lv->status & PARTIAL_LV)) {
log_error("LV %s in not partial.", display_lvname(lv));
return 0;
}
/* This is a repair, so try to do better than all-or-nothing */
match_count--;
@ -1760,9 +1713,7 @@ try_again:
*/
for (s = 0; s < raid_seg->area_count; s++) {
tmp_names[s] = NULL;
sd = s + raid_seg->area_count;
tmp_names[sd] = NULL;
if ((seg_type(raid_seg, s) == AREA_UNASSIGNED) &&
(seg_metatype(raid_seg, s) == AREA_UNASSIGNED)) {
@ -1770,12 +1721,7 @@ try_again:
lvl = dm_list_item(dm_list_first(&new_meta_lvs),
struct lv_list);
dm_list_del(&lvl->list);
tmp_names[s] = dm_pool_alloc(lv->vg->vgmem,
strlen(lvl->lv->name) + 1);
if (!tmp_names[s])
return_0;
if (dm_snprintf(tmp_names[s], strlen(lvl->lv->name) + 1,
"%s_rmeta_%u", lv->name, s) < 0)
if (!(tmp_names[s] = _generate_raid_name(lv, "rmeta", s)))
return_0;
if (!set_lv_segment_area_lv(raid_seg, s, lvl->lv, 0,
lvl->lv->status)) {
@ -1789,12 +1735,7 @@ try_again:
lvl = dm_list_item(dm_list_first(&new_data_lvs),
struct lv_list);
dm_list_del(&lvl->list);
tmp_names[sd] = dm_pool_alloc(lv->vg->vgmem,
strlen(lvl->lv->name) + 1);
if (!tmp_names[sd])
return_0;
if (dm_snprintf(tmp_names[sd], strlen(lvl->lv->name) + 1,
"%s_rimage_%u", lv->name, s) < 0)
if (!(tmp_names[sd] = _generate_raid_name(lv, "rimage", s)))
return_0;
if (!set_lv_segment_area_lv(raid_seg, s, lvl->lv, 0,
lvl->lv->status)) {
@ -1803,32 +1744,12 @@ try_again:
return 0;
}
lv_set_hidden(lvl->lv);
}
} else
tmp_names[s] = tmp_names[sd] = NULL;
}
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv_origin(lv->vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!resume_lv_origin(lv->vg->cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!lv_update_and_reload_origin(lv))
return_0;
dm_list_iterate_items(lvl, &old_lvs) {
if (!deactivate_lv(lv->vg->cmd, lvl->lv))
@ -1848,29 +1769,8 @@ try_again:
}
}
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv_origin(lv->vg->cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!resume_lv_origin(lv->vg->cmd, lv)) {
log_error("Failed to resume %s/%s after committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!lv_update_and_reload_origin(lv))
return_0;
return 1;
}
@ -1879,7 +1779,6 @@ int lv_raid_remove_missing(struct logical_volume *lv)
{
uint32_t s;
struct lv_segment *seg = first_seg(lv);
struct cmd_context *cmd = lv->vg->cmd;
if (!(lv->status & PARTIAL_LV)) {
log_error(INTERNAL_ERROR "%s/%s is not a partial LV",
@ -1887,6 +1786,9 @@ int lv_raid_remove_missing(struct logical_volume *lv)
return 0;
}
if (!archive(lv->vg))
return_0;
log_debug("Attempting to remove missing devices from %s LV, %s",
seg->segtype->ops->name(seg), lv->name);
@ -1902,38 +1804,18 @@ int lv_raid_remove_missing(struct logical_volume *lv)
log_debug("Replacing %s and %s segments with error target",
seg_lv(seg, s)->name, seg_metalv(seg, s)->name);
if (!replace_lv_with_error_segment(seg_lv(seg, s))) {
log_error("Failed to replace %s/%s's extents"
" with error target", lv->vg->name,
seg_lv(seg, s)->name);
log_error("Failed to replace %s's extents with error target.",
display_lvname(seg_lv(seg, s)));
return 0;
}
if (!replace_lv_with_error_segment(seg_metalv(seg, s))) {
log_error("Failed to replace %s/%s's extents"
" with error target", lv->vg->name,
seg_metalv(seg, s)->name);
log_error("Failed to replace %s's extents with error target.",
display_lvname(seg_metalv(seg, s)));
return 0;
}
}
if (!vg_write(lv->vg)) {
log_error("Failed to write changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!suspend_lv(cmd, lv)) {
log_error("Failed to suspend %s/%s before committing changes",
lv->vg->name, lv->name);
return 0;
}
if (!vg_commit(lv->vg)) {
log_error("Failed to commit changes to %s in %s",
lv->name, lv->vg->name);
return 0;
}
if (!resume_lv(cmd, lv))
if (!lv_update_and_reload(lv))
return_0;
return 1;
@ -1948,29 +1830,29 @@ static int _partial_raid_lv_is_redundant(struct logical_volume *lv)
uint32_t failed_components = 0;
if (!strcmp(raid_seg->segtype->name, "raid10")) {
/* FIXME: We only support 2-way mirrors in RAID10 currently */
/* FIXME: We only support 2-way mirrors in RAID10 currently */
copies = 2;
for (i = 0; i < raid_seg->area_count * copies; i++) {
s = i % raid_seg->area_count;
for (i = 0; i < raid_seg->area_count * copies; i++) {
s = i % raid_seg->area_count;
if (!(i % copies))
rebuilds_per_group = 0;
if (!(i % copies))
rebuilds_per_group = 0;
if ((seg_lv(raid_seg, s)->status & PARTIAL_LV) ||
(seg_metalv(raid_seg, s)->status & PARTIAL_LV) ||
lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)))
rebuilds_per_group++;
if ((seg_lv(raid_seg, s)->status & PARTIAL_LV) ||
(seg_metalv(raid_seg, s)->status & PARTIAL_LV) ||
lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)))
rebuilds_per_group++;
if (rebuilds_per_group >= copies) {
log_verbose("An entire mirror group has failed in %s",
if (rebuilds_per_group >= copies) {
log_verbose("An entire mirror group has failed in %s.",
display_lvname(lv));
return 0; /* Insufficient redundancy to activate */
return 0; /* Insufficient redundancy to activate */
}
}
}
return 1; /* Redundant */
}
}
for (s = 0; s < raid_seg->area_count; s++) {
if ((seg_lv(raid_seg, s)->status & PARTIAL_LV) ||
@ -1980,18 +1862,18 @@ static int _partial_raid_lv_is_redundant(struct logical_volume *lv)
failed_components++;
}
if (failed_components == raid_seg->area_count) {
log_verbose("All components of raid LV %s have failed",
if (failed_components == raid_seg->area_count) {
log_verbose("All components of raid LV %s have failed.",
display_lvname(lv));
return 0; /* Insufficient redundancy to activate */
} else if (raid_seg->segtype->parity_devs &&
(failed_components > raid_seg->segtype->parity_devs)) {
log_verbose("More than %u components from %s %s have failed",
return 0; /* Insufficient redundancy to activate */
} else if (raid_seg->segtype->parity_devs &&
(failed_components > raid_seg->segtype->parity_devs)) {
log_verbose("More than %u components from %s %s have failed.",
raid_seg->segtype->parity_devs,
raid_seg->segtype->ops->name(raid_seg),
display_lvname(lv));
return 0; /* Insufficient redundancy to activate */
}
raid_seg->segtype->ops->name(raid_seg),
display_lvname(lv));
return 0; /* Insufficient redundancy to activate */
}
return 1;
}
@ -2034,7 +1916,7 @@ int partial_raid_lv_supports_degraded_activation(struct logical_volume *lv)
int not_capable = 0;
if (!_lv_may_be_activated_in_degraded_mode(lv, &not_capable) || not_capable)
return 0;
return_0;
if (!for_each_sub_lv(lv, _lv_may_be_activated_in_degraded_mode, &not_capable)) {
log_error(INTERNAL_ERROR "for_each_sub_lv failure.");

View File

@ -43,11 +43,13 @@ struct dev_manager;
#define SEG_THIN_VOLUME 0x00001000U
#define SEG_CACHE 0x00002000U
#define SEG_CACHE_POOL 0x00004000U
#define SEG_MIRROR 0x00008000U
#define SEG_UNKNOWN 0x80000000U
#define segtype_is_cache(segtype) ((segtype)->flags & SEG_CACHE ? 1 : 0)
#define segtype_is_cache_pool(segtype) ((segtype)->flags & SEG_CACHE_POOL ? 1 : 0)
#define segtype_is_mirrored(segtype) ((segtype)->flags & SEG_AREAS_MIRRORED ? 1 : 0)
#define segtype_is_mirror(segtype) ((segtype)->flags & SEG_MIRROR ? 1 : 0)
#define segtype_is_pool(segtype) ((segtype)->flags & (SEG_CACHE_POOL | SEG_THIN_POOL) ? 1 : 0)
#define segtype_is_raid(segtype) ((segtype)->flags & SEG_RAID ? 1 : 0)
#define segtype_is_striped(segtype) ((segtype)->flags & SEG_AREAS_STRIPED ? 1 : 0)
@ -59,6 +61,7 @@ struct dev_manager;
#define seg_is_cache(seg) segtype_is_cache((seg)->segtype)
#define seg_is_cache_pool(seg) segtype_is_cache_pool((seg)->segtype)
#define seg_is_linear(seg) (seg_is_striped(seg) && ((seg)->area_count == 1))
#define seg_is_mirror(seg) segtype_is_mirror((seg)->segtype)
#define seg_is_mirrored(seg) segtype_is_mirrored((seg)->segtype)
#define seg_is_pool(seg) segtype_is_pool((seg)->segtype)
#define seg_is_raid(seg) segtype_is_raid((seg)->segtype)

View File

@ -136,12 +136,13 @@ int lv_is_virtual_origin(const struct logical_volume *lv)
int lv_is_merging_origin(const struct logical_volume *origin)
{
return (origin->status & MERGING) ? 1 : 0;
return lv_is_merging(origin);
}
int lv_is_merging_cow(const struct logical_volume *snapshot)
{
struct lv_segment *snap_seg = find_snapshot(snapshot);
/* checks lv_segment's status to see if cow is merging */
return (snap_seg && (snap_seg->status & MERGING)) ? 1 : 0;
}

View File

@ -519,36 +519,24 @@ int vg_set_alloc_policy(struct volume_group *vg, alloc_policy_t alloc)
return 1;
}
/*
* We do not currently support switching the cluster attribute
* with any active logical volumes.
*
* FIXME: resolve logic with reacquiring proper top-level LV locks
* and we likely can't giveup DLM locks for active LVs...
*/
int vg_set_clustered(struct volume_group *vg, int clustered)
{
struct lv_list *lvl;
struct logical_volume *lv;
/*
* We do not currently support switching the cluster attribute
* on active mirrors, snapshots or RAID logical volumes.
*/
dm_list_iterate_items(lvl, &vg->lvs) {
if (lv_is_active(lvl->lv) &&
(lv_is_mirrored(lvl->lv) || lv_is_raid_type(lvl->lv))) {
log_error("%s logical volumes must be inactive "
"when changing the cluster attribute.",
lv_is_raid_type(lvl->lv) ? "RAID" : "Mirror");
return 0;
}
if (clustered) {
if (lv_is_origin(lvl->lv) || lv_is_cow(lvl->lv)) {
log_error("Volume group %s contains snapshots "
"that are not yet supported.",
vg->name);
return 0;
}
}
if ((lv_is_origin(lvl->lv) || lv_is_cow(lvl->lv)) &&
lv_is_active(lvl->lv)) {
log_error("Snapshot logical volumes must be inactive "
"when changing the cluster attribute.");
/* For COW, check lock for origin */
lv = lv_is_cow(lvl->lv) ? origin_from_cow(lvl->lv) : lvl->lv;
if (lv_is_active(lv)) {
log_error("Can't change cluster attribute with active "
"oogical volume %s.", display_lvname(lv));
return 0;
}
}
@ -557,6 +545,10 @@ int vg_set_clustered(struct volume_group *vg, int clustered)
vg->status |= CLUSTERED;
else
vg->status &= ~CLUSTERED;
log_debug_metadata("Setting volume group %s as %sclustered.",
vg->name, clustered ? "" : "not " );
return 1;
}

View File

@ -628,7 +628,7 @@ struct segment_type *init_segtype(struct cmd_context *cmd)
segtype->ops = &_mirrored_ops;
segtype->name = "mirror";
segtype->private = NULL;
segtype->flags = SEG_AREAS_MIRRORED;
segtype->flags = SEG_MIRROR | SEG_AREAS_MIRRORED;
#ifdef DEVMAPPER_SUPPORT
# ifdef DMEVENTD

View File

@ -335,7 +335,7 @@ static int _raid_target_present(struct cmd_context *cmd,
uint32_t min;
unsigned raid_feature;
const char *feature;
} const _features[] = {
} _features[] = {
{ 1, 3, RAID_FEATURE_RAID10, "raid10" },
};

View File

@ -54,11 +54,11 @@ enum {
static const uint64_t _zero64 = UINT64_C(0);
static const uint64_t _one64 = UINT64_C(1);
static const char const _str_zero[] = "0";
static const char const _str_one[] = "1";
static const char const _str_no[] = "no";
static const char const _str_yes[] = "yes";
static const char const _str_unknown[] = "unknown";
static const char _str_zero[] = "0";
static const char _str_one[] = "1";
static const char _str_no[] = "no";
static const char _str_yes[] = "yes";
static const char _str_unknown[] = "unknown";
/*
* 32 bit signed is casted to 64 bit unsigned in dm_report_field internally!
@ -1042,9 +1042,7 @@ static int _copypercent_disp(struct dm_report *rh,
dm_percent_t percent = DM_PERCENT_INVALID;
if (((lv_is_raid(lv) && lv_raid_percent(lv, &percent)) ||
((lv->status & (PVMOVE | MIRRORED)) &&
lv_mirror_percent(lv->vg->cmd, lv, 0, &percent, NULL))) &&
(lv_is_mirror(lv) && lv_mirror_percent(lv->vg->cmd, lv, 0, &percent, NULL))) &&
(percent != DM_PERCENT_INVALID)) {
percent = copy_percent(lv);
return dm_report_field_percent(rh, field, &percent);
@ -1406,7 +1404,8 @@ static int _lvconverting_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
int converting = (((const struct logical_volume *) data)->status & CONVERTING) != 0;
int converting = lv_is_converting((const struct logical_volume *) data);
return _binary_disp(rh, mem, field, converting, "converting", private);
}
@ -1417,7 +1416,7 @@ static int _lvpermissions_disp(struct dm_report *rh, struct dm_pool *mem,
const struct lv_with_info *lvi = (const struct lv_with_info *) data;
const char *perms = "";
if (!(lvi->lv->status & PVMOVE)) {
if (!lv_is_pvmove(lvi->lv)) {
if (lvi->lv->status & LVM_WRITE) {
if (!lvi->info->exists)
perms = _str_unknown;
@ -1447,6 +1446,7 @@ static int _lvallocationlocked_disp(struct dm_report *rh, struct dm_pool *mem,
const void *data, void *private)
{
int alloc_locked = (((const struct logical_volume *) data)->status & LOCKED) != 0;
return _binary_disp(rh, mem, field, alloc_locked, FIRST_NAME(lv_allocation_locked_y), private);
}
@ -1455,6 +1455,7 @@ static int _lvfixedminor_disp(struct dm_report *rh, struct dm_pool *mem,
const void *data, void *private)
{
int fixed_minor = (((const struct logical_volume *) data)->status & FIXED_MINOR) != 0;
return _binary_disp(rh, mem, field, fixed_minor, FIRST_NAME(lv_fixed_minor_y), private);
}

View File

@ -92,6 +92,7 @@ static int _snap_text_export(const struct lv_segment *seg, struct formatter *f)
{
outf(f, "chunk_size = %u", seg->chunk_size);
outf(f, "origin = \"%s\"", seg->origin->name);
if (!(seg->status & MERGING))
outf(f, "cow_store = \"%s\"", seg->cow->name);
else

View File

@ -642,7 +642,7 @@ static int _thin_target_present(struct cmd_context *cmd,
uint32_t min;
unsigned thin_feature;
const char *feature;
} const _features[] = {
} _features[] = {
{ 1, 1, THIN_FEATURE_DISCARDS, "discards" },
{ 1, 1, THIN_FEATURE_EXTERNAL_ORIGIN, "external_origin" },
{ 1, 4, THIN_FEATURE_BLOCK_SIZE, "block_size" },

View File

@ -129,12 +129,16 @@ daemon_reply daemon_send_simple_v(daemon_handle h, const char *id, va_list ap)
static const daemon_reply err = { .error = ENOMEM };
daemon_request rq = { .cft = NULL };
daemon_reply repl;
va_list apc;
va_copy(apc, ap);
if (!buffer_append_f(&rq.buffer, "request = %s", id, NULL) ||
!buffer_append_vf(&rq.buffer, ap)) {
!buffer_append_vf(&rq.buffer, apc)) {
va_end(apc);
buffer_destroy(&rq.buffer);
return err;
}
va_end(apc);
repl = daemon_send(h, rq);
buffer_destroy(&rq.buffer);
@ -181,13 +185,17 @@ bad:
int daemon_request_extend_v(daemon_request r, va_list ap)
{
int res;
va_list apc;
if (!r.cft)
return 0;
if (!config_make_nodes_v(r.cft, NULL, r.cft->root, ap))
return 0;
va_copy(apc, ap);
res = config_make_nodes_v(r.cft, NULL, r.cft->root, apc) ? 1 : 0;
va_end(apc);
return 1;
return res;
}
int daemon_request_extend(daemon_request r, ...)

View File

@ -165,9 +165,13 @@ static int _uname(void)
/*
* Set number to NULL to populate _dm_bitset - otherwise first
* match is returned.
* Returns:
* 0 - error
* 1 - success - number found
* 2 - success - number not found (only if require_module_loaded=0)
*/
static int _get_proc_number(const char *file, const char *name,
uint32_t *number)
uint32_t *number, int require_module_loaded)
{
FILE *fl;
char nm[256];
@ -199,8 +203,11 @@ static int _get_proc_number(const char *file, const char *name,
free(line);
if (number) {
log_error("%s: No entry for %s found", file, name);
return 0;
if (require_module_loaded) {
log_error("%s: No entry for %s found", file, name);
return 0;
} else
return 2;
}
return 1;
@ -208,8 +215,8 @@ static int _get_proc_number(const char *file, const char *name,
static int _control_device_number(uint32_t *major, uint32_t *minor)
{
if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major) ||
!_get_proc_number(PROC_MISC, DM_NAME, minor)) {
if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major, 1) ||
!_get_proc_number(PROC_MISC, DM_NAME, minor, 1)) {
*major = 0;
return 0;
}
@ -296,8 +303,15 @@ static int _create_control(const char *control, uint32_t major, uint32_t minor)
/*
* FIXME Update bitset in long-running process if dm claims new major numbers.
*/
static int _create_dm_bitset(void)
/*
* If require_module_loaded=0, caller is responsible to check
* whether _dm_device_major or _dm_bitset is really set. If
* it's not, it means the module is not loaded.
*/
static int _create_dm_bitset(int require_module_loaded)
{
int r;
#ifdef DM_IOCTLS
if (_dm_bitset || _dm_device_major)
return 1;
@ -315,7 +329,8 @@ static int _create_dm_bitset(void)
_dm_multiple_major_support = 0;
if (!_dm_multiple_major_support) {
if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major))
if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major,
require_module_loaded))
return 0;
return 1;
}
@ -324,10 +339,15 @@ static int _create_dm_bitset(void)
if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
return 0;
if (!_get_proc_number(PROC_DEVICES, DM_NAME, NULL)) {
r = _get_proc_number(PROC_DEVICES, DM_NAME, NULL, require_module_loaded);
if (!r || r == 2) {
dm_bitset_destroy(_dm_bitset);
_dm_bitset = NULL;
return 0;
/*
* It's not an error if we didn't find anything and we
* didn't require module to be loaded at the same time.
*/
return r == 2;
}
return 1;
@ -338,13 +358,19 @@ static int _create_dm_bitset(void)
int dm_is_dm_major(uint32_t major)
{
if (!_create_dm_bitset())
if (!_create_dm_bitset(0))
return 0;
if (_dm_multiple_major_support)
if (_dm_multiple_major_support) {
if (!_dm_bitset)
return 0;
return dm_bit(_dm_bitset, major) ? 1 : 0;
else
}
else {
if (!_dm_device_major)
return 0;
return (major == _dm_device_major) ? 1 : 0;
}
}
static void _close_control_fd(void)
@ -406,7 +432,7 @@ static int _open_control(void)
if (!_open_and_assign_control_fd(control))
goto_bad;
if (!_create_dm_bitset()) {
if (!_create_dm_bitset(1)) {
log_error("Failed to set up list of device-mapper major numbers");
return 0;
}

View File

@ -1649,8 +1649,8 @@ static void _unmangle_mountinfo_string(const char *src, char *buf)
/* Parse one line of mountinfo and unmangled target line */
static int _mountinfo_parse_line(const char *line, unsigned *maj, unsigned *min, char *buf)
{
char root[PATH_MAX];
char target[PATH_MAX];
char root[PATH_MAX + 1]; /* sscanf needs extra '\0' */
char target[PATH_MAX + 1];
/* TODO: maybe detect availability of %ms glib support ? */
if (sscanf(line, "%*u %*u %u:%u %" DM_TO_STRING(PATH_MAX)

View File

@ -1721,7 +1721,7 @@ static const char *_tok_value_number(const char *s,
int is_float = 0;
*begin = s;
while (*s && ((!is_float && *s=='.' && (is_float=1)) || isdigit(*s)))
while ((!is_float && (*s == '.') && ((is_float = 1))) || isdigit(*s))
s++;
*end = s;
@ -2122,7 +2122,7 @@ static const char *_tok_value_string_list(const struct dm_report_field_type *ft,
}
/* Store information whether [] or {} was used. */
if ((end_op_flag_expected == SEL_LIST_LE))
if (end_op_flag_expected == SEL_LIST_LE)
ssl->type |= SEL_LIST_LS;
else
ssl->type |= SEL_LIST_SUBSET_LS;

View File

@ -295,13 +295,13 @@ static int _lvm_lv_activate(lv_t lv)
return -1;
/* FIXME: handle pvmove stuff later */
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Unable to activate locked LV");
return -1;
}
/* FIXME: handle lvconvert stuff later */
if (lv->status & CONVERTING) {
if (lv_is_converting(lv)) {
log_error("Unable to activate LV with in-progress lvconvert");
return -1;
}

View File

@ -160,6 +160,7 @@ ifeq ("@HAVE_PIE@", "yes")
ifeq ("@HAVE_FULL_RELRO@", "yes")
EXTRA_EXEC_CFLAGS += -fPIE -DPIE
EXTRA_EXEC_LDFLAGS += -Wl,-z,relro,-z,now -pie
CLDFLAGS += -Wl,-z,relro
endif
endif
endif
@ -411,7 +412,7 @@ $(LIB_STATIC): $(OBJECTS)
set -e; \
FILE=`echo $@ | sed 's/\\//\\\\\\//g;s/\\.d//g'`; \
DEPS=`echo $(DEPS) | sed -e 's/\\//\\\\\\//g'`; \
$(CC) -MM $(INCLUDES) $(DEFS) -o $@ $<; \
$(CC) -MM $(INCLUDES) $(BLKID_CFLAGS) $(DEFS) -o $@ $<; \
sed -i "s/\(.*\)\.o[ :]*/$$FILE.o $$FILE.d $$FILE.pot: $$DEPS /g" $@; \
DEPLIST=`sed 's/ \\\\//;s/.*://;' < $@`; \
echo $$DEPLIST | fmt -1 | sed 's/ //g;s/\(.*\)/\1:/' >> $@; \

View File

@ -259,8 +259,9 @@ lv_exists() {
lv_not_exists() {
local vg=$1
if test $# -le 1 ; then
lvl $vg &>/dev/null || return
die "$vg expected to not exist but it does!"
if lvl $vg &>/dev/null ; then
die "$vg expected to not exist but it does!"
fi
else
while [ $# -gt 1 ]; do
shift
@ -268,6 +269,7 @@ lv_not_exists() {
die "$vg/$1 expected to not exist but it does!"
done
fi
rm -f debug.log
}
pv_field() {

View File

@ -0,0 +1,18 @@
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
test -e LOCAL_LVMETAD || skip
aux prepare_pvs 2
pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | grep rejected
pvs --config 'devices { filter = [ "r%.*%" ] }' 2>&1 | not grep 'No device found'

View File

@ -0,0 +1,77 @@
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
test -e LOCAL_LVMETAD || skip
which mdadm || skip
test -f /proc/mdstat && grep -q raid0 /proc/mdstat || \
modprobe raid0 || skip
aux lvmconf 'devices/md_component_detection = 1'
aux extend_filter_LVMTEST
aux extend_filter "a|/dev/md.*|"
aux prepare_devs 2
# TODO factor out the following MD-creation code into lib/
# Have MD use a non-standard name to avoid colliding with an existing MD device
# - mdadm >= 3.0 requires that non-standard device names be in /dev/md/
# - newer mdadm _completely_ defers to udev to create the associated device node
mdadm_maj=$(mdadm --version 2>&1 | perl -pi -e 's|.* v(\d+).*|\1|')
[ $mdadm_maj -ge 3 ] && \
mddev=/dev/md/md_lvm_test0 || \
mddev=/dev/md_lvm_test0
cleanup_md() {
# sleeps offer hack to defeat: 'md: md127 still in use'
# see: https://bugzilla.redhat.com/show_bug.cgi?id=509908#c25
aux udev_wait
mdadm --stop "$mddev" || true
aux udev_wait
if [ -b "$mddev" ]; then
# mdadm doesn't always cleanup the device node
sleep 2
rm -f "$mddev"
fi
}
cleanup_md_and_teardown() {
cleanup_md
aux teardown
}
# create 2 disk MD raid0 array (stripe_width=128K)
test -b "$mddev" && skip
mdadm --create --metadata=1.0 "$mddev" --auto=md --level 0 --raid-devices=2 --chunk 64 "$dev1" "$dev2"
trap 'cleanup_md_and_teardown' EXIT # cleanup this MD device at the end of the test
test -b "$mddev" || skip
cp -LR "$mddev" "$DM_DEV_DIR" # so that LVM/DM can see the device
lvmdev="$DM_DEV_DIR/md_lvm_test0"
# TODO end MD-creation code
# maj=$(($(stat -L --printf=0x%t "$dev2")))
# min=$(($(stat -L --printf=0x%T "$dev2")))
pvcreate $lvmdev
pvscan --cache "$lvmdev"
# ensure that lvmetad can only see the toplevel MD device
not pvscan --cache "$dev1" 2>&1 | grep "not found"
not pvscan --cache "$dev2" 2>&1 | grep "not found"
pvs | grep $lvmdev
pvs | not grep $dev1
pvs | not grep $dev2

View File

@ -0,0 +1,24 @@
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Testing renaming snapshots in cluster
# https://bugzilla.redhat.com/show_bug.cgi?id=1136925
. lib/inittest
aux prepare_vg 1
lvcreate -aey -L1 -n $lv1 $vg
lvcreate -s -L1 -n $lv2 $vg/$lv1
lvrename $vg/$lv2 $vg/$lv3
lvremove -f $vg/$lv1
vgremove -f $vg

View File

@ -0,0 +1,24 @@
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Testing renaming snapshots (had problem in cluster)
# https://bugzilla.redhat.com/show_bug.cgi?id=1136925
. lib/inittest
aux prepare_vg 1
lvcreate -aey -L1 -n $lv1 $vg
lvcreate -s -L1 -n $lv2 $vg/$lv1
lvrename $vg/$lv2 $vg/$lv3
lvremove -f $vg/$lv1
vgremove -f $vg

View File

@ -38,6 +38,11 @@ aux lvmconf "activation/volume_list = [ \"$vg1\" ]"
# Pool is not active - so it cannot create thin volume
not lvcreate -V10 -T $vg/pool
# Cannot create even new pool
# check there are not left devices (RHBZ #1140128)
not lvcreate -L10 -T $vg/new_pool
check lv_not_exists $vg/new_pool
aux lvmconf "activation/volume_list = [ \"$vg\" ]"
lvcreate -V10 -T $vg/pool

View File

@ -89,16 +89,27 @@ fail vgchange -cy |& tee out
grep "y/n" out
check vg_attr_bit cluster $vg "-"
lvcreate -l1 $vg
# check on cluster
# either skipped as clustered (non-cluster), or already clustered (on cluster)
if test -e LOCAL_CLVMD ; then
# can't switch with active LV
not vgchange -cy $vg
lvchange -an $vg
vgchange -cy $vg
fail vgchange -cy $vg
check vg_attr_bit cluster $vg "c"
lvchange -ay $vg
not vgchange -cn $vg
lvchange -an $vg
vgchange -cn $vg
else
# no clvmd is running
fail vgchange -cy $vg
# can't switch with active LV
not vgchange --yes -cy $vg
lvchange -an $vg
vgchange --yes -cy $vg
fail vgchange --yes -cy $vg
fail vgs $vg |& tee out

View File

@ -20,7 +20,6 @@ static int lvchange_permission(struct cmd_context *cmd,
{
uint32_t lv_access;
struct lvinfo info;
int r = 0;
lv_access = arg_uint_value(cmd, permission_ARG, 0);
@ -42,7 +41,7 @@ static int lvchange_permission(struct cmd_context *cmd,
return 0;
}
if ((lv->status & MIRRORED) && (vg_is_clustered(lv->vg)) &&
if (lv_is_mirrored(lv) && vg_is_clustered(lv->vg) &&
lv_info(cmd, lv, 0, &info, 0, 0) && info.exists) {
log_error("Cannot change permissions of mirror \"%s\" "
"while active.", lv->name);
@ -50,9 +49,9 @@ static int lvchange_permission(struct cmd_context *cmd,
}
/* Not allowed to change permissions on RAID sub-LVs directly */
if ((lv->status & RAID_META) || (lv->status & RAID_IMAGE)) {
if (lv_is_raid_metadata(lv) || lv_is_raid_image(lv)) {
log_error("Cannot change permissions of RAID %s \"%s\"",
(lv->status & RAID_IMAGE) ? "image" :
lv_is_raid_image(lv) ? "image" :
"metadata area", lv->name);
return 0;
}
@ -73,38 +72,15 @@ static int lvchange_permission(struct cmd_context *cmd,
lv->name);
}
log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name);
if (!vg_write(lv->vg))
if (!lv_update_and_reload(lv))
return_0;
if (!suspend_lv(cmd, lv)) {
log_error("Failed to lock %s", lv->name);
vg_revert(lv->vg);
goto out;
}
if (!vg_commit(lv->vg)) {
if (!resume_lv(cmd, lv))
stack;
goto_out;
}
log_very_verbose("Updating permissions for \"%s\" in kernel", lv->name);
if (!resume_lv(cmd, lv)) {
log_error("Problem reactivating %s", lv->name);
goto out;
}
r = 1;
out:
backup(lv->vg);
return r;
return 1;
}
static int lvchange_pool_update(struct cmd_context *cmd,
struct logical_volume *lv)
{
int r = 0;
int update = 0;
unsigned val;
thin_discards_t discards;
@ -143,32 +119,10 @@ static int lvchange_pool_update(struct cmd_context *cmd,
if (!update)
return 0;
log_very_verbose("Updating logical volume \"%s\" on disk(s).", lv->name);
if (!vg_write(lv->vg))
if (!lv_update_and_reload_origin(lv))
return_0;
if (!suspend_lv_origin(cmd, lv)) {
log_error("Failed to update active %s/%s (deactivation is needed).",
lv->vg->name, lv->name);
vg_revert(lv->vg);
goto out;
}
if (!vg_commit(lv->vg)) {
if (!resume_lv_origin(cmd, lv))
stack;
goto_out;
}
if (!resume_lv_origin(cmd, lv)) {
log_error("Problem reactivating %s.", lv->name);
goto out;
}
r = 1;
out:
backup(lv->vg);
return r;
return 1;
}
static int lvchange_monitoring(struct cmd_context *cmd,
@ -183,7 +137,7 @@ static int lvchange_monitoring(struct cmd_context *cmd,
}
/* do not monitor pvmove lv's */
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
return 1;
if ((dmeventd_monitor_mode() != DMEVENTD_MONITOR_IGNORE) &&
@ -333,18 +287,18 @@ static int lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
dm_list_init(&device_list);
if (!(lv->status & MIRRORED) && !seg_is_raid(seg)) {
if (!seg_is_mirror(seg) && !seg_is_raid(seg)) {
log_error("Unable to resync %s. It is not RAID or mirrored.",
lv->name);
return 0;
}
if (lv->status & PVMOVE) {
if (lv_is_pvmove(lv)) {
log_error("Unable to resync pvmove volume %s", lv->name);
return 0;
}
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Unable to resync locked volume %s", lv->name);
return 0;
}
@ -555,7 +509,6 @@ static int lvchange_readahead(struct cmd_context *cmd,
{
unsigned read_ahead = 0;
unsigned pagesize = (unsigned) lvm_getpagesize() >> SECTOR_SHIFT;
int r = 0;
read_ahead = arg_uint_value(cmd, readahead_ARG, 0);
@ -590,32 +543,10 @@ static int lvchange_readahead(struct cmd_context *cmd,
log_verbose("Setting read ahead to %u for \"%s\"", read_ahead,
lv->name);
log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name);
if (!vg_write(lv->vg))
if (!lv_update_and_reload(lv))
return_0;
if (!suspend_lv(cmd, lv)) {
log_error("Failed to lock %s", lv->name);
vg_revert(lv->vg);
goto out;
}
if (!vg_commit(lv->vg)) {
if (!resume_lv(cmd, lv))
stack;
goto_out;
}
log_very_verbose("Updating permissions for \"%s\" in kernel", lv->name);
if (!resume_lv(cmd, lv)) {
log_error("Problem reactivating %s", lv->name);
goto out;
}
r = 1;
out:
backup(lv->vg);
return r;
return 1;
}
static int lvchange_persistent(struct cmd_context *cmd,
@ -794,7 +725,7 @@ static int lvchange_writemostly(struct logical_volume *lv)
* We don't bother checking the metadata area,
* since writemostly only affects the data areas.
*/
if ((seg_type(raid_seg, s) == AREA_UNASSIGNED))
if (seg_type(raid_seg, s) == AREA_UNASSIGNED)
continue;
if (lv_is_on_pv(seg_lv(raid_seg, s), pvl->pv)) {
@ -814,26 +745,9 @@ static int lvchange_writemostly(struct logical_volume *lv)
}
}
if (!vg_write(lv->vg))
if (!lv_update_and_reload(lv))
return_0;
if (!suspend_lv(cmd, lv)) {
vg_revert(lv->vg);
return_0;
}
if (!vg_commit(lv->vg)) {
if (!resume_lv(cmd, lv))
stack;
return_0;
}
log_very_verbose("Updating writemostly for \"%s\" in kernel", lv->name);
if (!resume_lv(cmd, lv)) {
log_error("Problem reactivating %s", lv->name);
return 0;
}
return 1;
}
@ -862,27 +776,9 @@ static int lvchange_recovery_rate(struct logical_volume *lv)
return 0;
}
if (!vg_write(lv->vg))
if (!lv_update_and_reload(lv))
return_0;
if (!suspend_lv(cmd, lv)) {
vg_revert(lv->vg);
return_0;
}
if (!vg_commit(lv->vg)) {
if (!resume_lv(cmd, lv))
stack;
return_0;
}
log_very_verbose("Updating recovery rate for \"%s\" in kernel",
lv->name);
if (!resume_lv(cmd, lv)) {
log_error("Problem reactivating %s", lv->name);
return 0;
}
return 1;
}
@ -963,19 +859,19 @@ static int _lvchange_single(struct cmd_context *cmd, struct logical_volume *lv,
return ECMD_FAILED;
}
if (lv->status & PVMOVE) {
if (lv_is_pvmove(lv)) {
log_error("Unable to change pvmove LV %s", lv->name);
if (arg_count(cmd, activate_ARG))
log_error("Use 'pvmove --abort' to abandon a pvmove");
return ECMD_FAILED;
}
if (lv->status & MIRROR_LOG) {
if (lv_is_mirror_log(lv)) {
log_error("Unable to change mirror log LV %s directly", lv->name);
return ECMD_FAILED;
}
if (lv->status & MIRROR_IMAGE) {
if (lv_is_mirror_image(lv)) {
log_error("Unable to change mirror image LV %s directly",
lv->name);
return ECMD_FAILED;

View File

@ -672,51 +672,12 @@ static struct logical_volume *_get_lvconvert_lv(struct cmd_context *cmd __attrib
return lv;
}
static int _reload_lv(struct cmd_context *cmd,
struct volume_group *vg,
struct logical_volume *lv)
{
int r = 0;
log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name);
if (!vg_write(vg))
return_0;
if (!suspend_lv(cmd, lv)) {
log_error("Failed to lock %s", lv->name);
vg_revert(vg);
if (!resume_lv(cmd, lv))
stack;
goto out;
}
if (!vg_commit(vg)) {
vg_revert(vg);
if (!resume_lv(cmd, lv))
stack;
goto_out;
}
log_very_verbose("Updating \"%s\" in kernel", lv->name);
if (!resume_lv(cmd, lv)) {
log_error("Problem reactivating %s", lv->name);
goto out;
}
r = 1;
backup(vg);
out:
return r;
}
static int _finish_lvconvert_mirror(struct cmd_context *cmd,
struct volume_group *vg,
struct logical_volume *lv,
struct dm_list *lvs_changed __attribute__((unused)))
{
if (!(lv->status & CONVERTING))
if (!lv_is_converting(lv))
return 1;
if (!collapse_mirrored_lv(lv)) {
@ -726,9 +687,7 @@ static int _finish_lvconvert_mirror(struct cmd_context *cmd,
lv->status &= ~CONVERTING;
log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name);
if (!_reload_lv(cmd, vg, lv))
if (!lv_update_and_reload(lv))
return_0;
log_print_unless_silent("Logical volume %s converted.", lv->name);
@ -1009,7 +968,7 @@ static int _failed_logs_count(struct logical_volume *lv)
unsigned s;
struct logical_volume *log_lv = first_seg(lv)->log_lv;
if (log_lv && (log_lv->status & PARTIAL_LV)) {
if (log_lv->status & MIRRORED)
if (lv_is_mirrored(log_lv))
ret += _failed_mirrors_count(log_lv);
else
ret += 1;
@ -1150,7 +1109,7 @@ static int _lv_update_mirrored_log(struct logical_volume *lv,
return 1;
log_lv = first_seg(_original_lv(lv))->log_lv;
if (!log_lv || !(log_lv->status & MIRRORED))
if (!log_lv || !lv_is_mirrored(log_lv))
return 1;
old_log_count = _get_log_count(lv);
@ -1207,7 +1166,7 @@ static int _lv_update_log_type(struct cmd_context *cmd,
* mirrored logs in cluster mirrors.
*/
if (old_log_count &&
!_reload_lv(cmd, log_lv->vg, log_lv))
!lv_update_and_reload(log_lv))
return_0;
return 1;
@ -1293,7 +1252,7 @@ static int _lvconvert_mirrors_parse_params(struct cmd_context *cmd,
*new_mimage_count = *old_mimage_count;
*new_log_count = *old_log_count;
if (find_temporary_mirror(lv) || (lv->status & CONVERTING))
if (find_temporary_mirror(lv) || lv_is_converting(lv))
lp->need_polling = 1;
return 1;
}
@ -1378,7 +1337,7 @@ static int _lvconvert_mirrors_parse_params(struct cmd_context *cmd,
/*
* Region size must not change on existing mirrors
*/
if (arg_count(cmd, regionsize_ARG) && (lv->status & MIRRORED) &&
if (arg_count(cmd, regionsize_ARG) && lv_is_mirrored(lv) &&
(lp->region_size != first_seg(lv)->region_size)) {
log_error("Mirror log region size cannot be changed on "
"an existing mirror.");
@ -1389,7 +1348,7 @@ static int _lvconvert_mirrors_parse_params(struct cmd_context *cmd,
* For the most part, we cannot handle multi-segment mirrors. Bail out
* early if we have encountered one.
*/
if ((lv->status & MIRRORED) && dm_list_size(&lv->segments) != 1) {
if (lv_is_mirrored(lv) && dm_list_size(&lv->segments) != 1) {
log_error("Logical volume %s has multiple "
"mirror segments.", lv->name);
return 0;
@ -1419,7 +1378,7 @@ static int _lvconvert_mirrors_aux(struct cmd_context *cmd,
uint32_t old_mimage_count = lv_mirror_count(lv);
uint32_t old_log_count = _get_log_count(lv);
if ((lp->mirrors == 1) && !(lv->status & MIRRORED)) {
if ((lp->mirrors == 1) && !lv_is_mirrored(lv)) {
log_warn("Logical volume %s is already not mirrored.",
lv->name);
return 1;
@ -1437,7 +1396,7 @@ static int _lvconvert_mirrors_aux(struct cmd_context *cmd,
/*
* Up-convert from linear to mirror
*/
if (!(lv->status & MIRRORED)) {
if (!lv_is_mirrored(lv)) {
/* FIXME Share code with lvcreate */
/*
@ -1483,7 +1442,7 @@ static int _lvconvert_mirrors_aux(struct cmd_context *cmd,
* Is there already a convert in progress? We do not
* currently allow more than one.
*/
if (find_temporary_mirror(lv) || (lv->status & CONVERTING)) {
if (find_temporary_mirror(lv) || lv_is_converting(lv)) {
log_error("%s is already being converted. Unable to start another conversion.",
lv->name);
return 0;
@ -1564,7 +1523,7 @@ out:
/*
* Converting the log type
*/
if ((lv->status & MIRRORED) && (old_log_count != new_log_count)) {
if (lv_is_mirrored(lv) && (old_log_count != new_log_count)) {
if (!_lv_update_log_type(cmd, lp, lv,
operable_pvs, new_log_count))
return_0;
@ -1572,7 +1531,7 @@ out:
out_skip_log_convert:
if (!_reload_lv(cmd, lv->vg, lv))
if (!lv_update_and_reload(lv))
return_0;
return 1;
@ -1609,7 +1568,7 @@ int mirror_remove_missing(struct cmd_context *cmd,
!_lv_update_log_type(cmd, NULL, lv, failed_pvs, log_count))
return_0;
if (!_reload_lv(cmd, lv->vg, lv))
if (!lv_update_and_reload(lv))
return_0;
return 1;
@ -2000,7 +1959,7 @@ static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volu
if (!vg_check_status(vg, LVM_WRITE))
return_ECMD_FAILED;
if (lv_is_mirror_type(cow) || lv_is_raid_type(cow) || lv_is_thin_type(cow)) {
if (lv_is_pvmove(cow) || lv_is_mirror_type(cow) || lv_is_raid_type(cow) || lv_is_thin_type(cow)) {
log_error("LV %s/%s type is unsupported with --splitsnapshot.", vg->name, cow->name);
return ECMD_FAILED;
}
@ -2041,7 +2000,7 @@ static int _lvconvert_snapshot(struct cmd_context *cmd,
{
struct logical_volume *org;
if (lv->status & MIRRORED) {
if (lv_is_mirrored(lv)) {
log_error("Unable to convert mirrored LV \"%s\" into a snapshot.", lv->name);
return 0;
}
@ -2066,11 +2025,11 @@ static int _lvconvert_snapshot(struct cmd_context *cmd,
if (!cow_has_min_chunks(lv->vg, lv->le_count, lp->chunk_size))
return_0;
if (org->status & (LOCKED|PVMOVE|MIRRORED) || lv_is_cow(org)) {
if (lv_is_locked(org) || lv_is_pvmove(org) || lv_is_mirrored(org) || lv_is_cow(org)) {
log_error("Unable to convert an LV into a snapshot of a %s LV.",
org->status & LOCKED ? "locked" :
org->status & PVMOVE ? "pvmove" :
org->status & MIRRORED ? "mirrored" :
lv_is_locked(org) ? "locked" :
lv_is_pvmove(org) ? "pvmove" :
lv_is_mirrored(org) ? "mirrored" :
"snapshot");
return 0;
}
@ -2117,7 +2076,7 @@ static int _lvconvert_snapshot(struct cmd_context *cmd,
}
/* store vg on disk(s) */
if (!_reload_lv(cmd, lv->vg, org))
if (!lv_update_and_reload(org))
return_0;
log_print_unless_silent("Logical volume %s converted to snapshot.", lv->name);
@ -2608,7 +2567,7 @@ static int _lvconvert_thin(struct cmd_context *cmd,
goto revert_new_lv;
}
if (!_reload_lv(cmd, vg, torigin_lv)) {
if (!lv_update_and_reload(torigin_lv)) {
stack;
goto deactivate_and_revert_new_lv;
}
@ -2706,7 +2665,7 @@ static int _lvconvert_pool(struct cmd_context *cmd,
log_error("Try \"raid1\" segment type instead.");
return 0;
}
if (metadata_lv->status & LOCKED) {
if (lv_is_locked(metadata_lv)) {
log_error("Can't convert locked LV %s.",
display_lvname(metadata_lv));
return 0;
@ -3057,7 +3016,7 @@ static int _lvconvert_cache(struct cmd_context *cmd,
if (!(cache_lv = lv_cache_create(pool_lv, origin)))
return_0;
if (!_reload_lv(cmd, cache_lv->vg, cache_lv))
if (!lv_update_and_reload(cache_lv))
return_0;
log_print_unless_silent("Logical volume %s is now cached.",
@ -3072,7 +3031,7 @@ static int _lvconvert_single(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp = handle;
struct dm_list *failed_pvs;
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
log_error("Cannot convert locked LV %s", lv->name);
return ECMD_FAILED;
}
@ -3083,7 +3042,7 @@ static int _lvconvert_single(struct cmd_context *cmd, struct logical_volume *lv,
return ECMD_FAILED;
}
if (lv->status & PVMOVE) {
if (lv_is_pvmove(lv)) {
log_error("Unable to convert pvmove LV %s", lv->name);
return ECMD_FAILED;
}
@ -3153,7 +3112,7 @@ static int _lvconvert_single(struct cmd_context *cmd, struct logical_volume *lv,
_remove_missing_empty_pv(lv->vg, failed_pvs);
} else if (arg_count(cmd, mirrors_ARG) ||
arg_count(cmd, splitmirrors_ARG) ||
(lv->status & MIRRORED)) {
lv_is_mirrored(lv)) {
if (!archive(lv->vg))
return_ECMD_FAILED;

View File

@ -1343,7 +1343,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
goto_out;
_apply_settings(cmd);
if (cmd->degraded_activation)
log_verbose("DEGRADED MODE. Incomplete RAID LVs will be processed.");
log_debug("DEGRADED MODE. Incomplete RAID LVs will be processed.");
if (!get_activation_monitoring_mode(cmd, &monitoring))
goto_out;

View File

@ -111,9 +111,9 @@ int lvrename(struct cmd_context *cmd, int argc, char **argv)
goto bad;
}
if (lvl->lv->status & (RAID_IMAGE | RAID_META)) {
if (lv_is_raid_image(lvl->lv) || lv_is_raid_metadata(lvl->lv)) {
log_error("Cannot rename a RAID %s directly",
(lvl->lv->status & RAID_IMAGE) ? "image" :
lv_is_raid_image(lvl->lv) ? "image" :
"metadata area");
goto bad;
}

View File

@ -322,19 +322,18 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
* RAID, thin and snapshot-related LVs are not
* processed in a cluster, so we don't have to
* worry about avoiding certain PVs in that context.
*
* Allow clustered mirror, but not raid mirror.
*/
if (vg_is_clustered(lv->vg)) {
/* Allow clustered mirror, but not raid mirror. */
if (!lv_is_mirror_type(lv) || lv_is_raid(lv))
continue;
}
if (vg_is_clustered(lv->vg) && !lv_is_mirror_type(lv))
continue;
if (!lv_is_on_pvs(lv, source_pvl))
continue;
if (lv->status & (CONVERTING | MERGING)) {
if (lv_is_converting(lv) || lv_is_merging(lv)) {
log_error("Unable to pvmove when %s volumes are present",
(lv->status & CONVERTING) ?
lv_is_converting(lv) ?
"converting" : "merging");
return NULL;
}
@ -423,7 +422,7 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
continue;
}
if (lv->status & LOCKED) {
if (lv_is_locked(lv)) {
lv_skipped = 1;
log_print_unless_silent("Skipping locked LV %s", lv->name);
continue;

View File

@ -1305,9 +1305,14 @@ struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl)
return r;
}
void vgcreate_params_set_defaults(struct vgcreate_params *vp_def,
struct volume_group *vg)
const char _pe_size_may_not_be_negative_msg[] = "Physical extent size may not be negative";
int vgcreate_params_set_defaults(struct cmd_context *cmd,
struct vgcreate_params *vp_def,
struct volume_group *vg)
{
int64_t extent_size;
if (vg) {
vp_def->vg_name = NULL;
vp_def->extent_size = vg->extent_size;
@ -1318,13 +1323,21 @@ void vgcreate_params_set_defaults(struct vgcreate_params *vp_def,
vp_def->vgmetadatacopies = vg->mda_copies;
} else {
vp_def->vg_name = NULL;
vp_def->extent_size = DEFAULT_EXTENT_SIZE * 2;
extent_size = find_config_tree_int64(cmd,
allocation_physical_extent_size_CFG, NULL) * 2;
if (extent_size < 0) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
vp_def->extent_size = (uint32_t) extent_size;
vp_def->max_pv = DEFAULT_MAX_PV;
vp_def->max_lv = DEFAULT_MAX_LV;
vp_def->alloc = DEFAULT_ALLOC_POLICY;
vp_def->clustered = DEFAULT_CLUSTERED;
vp_def->vgmetadatacopies = DEFAULT_VGMETADATACOPIES;
}
return 1;
}
/*
@ -1357,7 +1370,7 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
vp_new->clustered = locking_is_clustered();
if (arg_sign_value(cmd, physicalextentsize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Physical extent size may not be negative");
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
@ -1427,7 +1440,7 @@ int lv_change_activate(struct cmd_context *cmd, struct logical_volume *lv,
if (background_polling() &&
is_change_activating(activate) &&
(lv->status & (PVMOVE|CONVERTING|MERGING)))
(lv_is_pvmove(lv) || lv_is_converting(lv) || lv_is_merging(lv)))
lv_spawn_background_polling(cmd, lv);
return r;
@ -1496,19 +1509,19 @@ void lv_spawn_background_polling(struct cmd_context *cmd,
{
const char *pvname;
if ((lv->status & PVMOVE) &&
if (lv_is_pvmove(lv) &&
(pvname = get_pvmove_pvname_from_lv_mirr(lv))) {
log_verbose("Spawning background pvmove process for %s",
pvname);
pvmove_poll(cmd, pvname, 1);
} else if ((lv->status & LOCKED) &&
(pvname = get_pvmove_pvname_from_lv(lv))) {
} else if (lv_is_locked(lv) &&
(pvname = get_pvmove_pvname_from_lv(lv))) {
log_verbose("Spawning background pvmove process for %s",
pvname);
pvmove_poll(cmd, pvname, 1);
}
if (lv->status & (CONVERTING|MERGING)) {
if (lv_is_converting(lv) || lv_is_merging(lv)) {
log_verbose("Spawning background lvconvert process for %s",
lv->name);
lvconvert_poll(cmd, lv, 1);

View File

@ -106,7 +106,8 @@ struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int
struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvs);
void vgcreate_params_set_defaults(struct vgcreate_params *vp_def,
int vgcreate_params_set_defaults(struct cmd_context *cmd,
struct vgcreate_params *vp_def,
struct volume_group *vg);
int vgcreate_params_set_from_args(struct cmd_context *cmd,
struct vgcreate_params *vp_new,

View File

@ -36,7 +36,7 @@ static int _monitor_lvs_in_vg(struct cmd_context *cmd,
/*
* FIXME: Need to consider all cases... PVMOVE, etc
*/
if (lv->status & PVMOVE)
if (lv_is_pvmove(lv))
continue;
if (!monitor_dev_for_events(cmd, lv, 0, reg)) {
@ -67,7 +67,7 @@ static int _poll_lvs_in_vg(struct cmd_context *cmd,
lv_active = info.exists;
if (lv_active &&
(lv->status & (PVMOVE|CONVERTING|MERGING))) {
(lv_is_pvmove(lv) || lv_is_converting(lv) || lv_is_merging(lv))) {
lv_spawn_background_polling(cmd, lv);
count++;
}
@ -121,7 +121,7 @@ static int _activate_lvs_in_vg(struct cmd_context *cmd, struct volume_group *vg,
/* Can't deactivate a pvmove LV */
/* FIXME There needs to be a controlled way of doing this */
if ((lv->status & PVMOVE) && !is_change_activating(activate))
if (lv_is_pvmove(lv) && !is_change_activating(activate))
continue;
if (lv_activation_skip(lv, activate, arg_count(cmd, ignoreactivationskip_ARG)))

View File

@ -41,7 +41,8 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
vgcreate_params_set_defaults(&vp_def, NULL);
if (!vgcreate_params_set_defaults(cmd, &vp_def, NULL))
return EINVALID_CMD_LINE;
vp_def.vg_name = vg_name;
if (!vgcreate_params_set_from_args(cmd, &vp_new, &vp_def))
return EINVALID_CMD_LINE;

View File

@ -95,13 +95,13 @@ static int _make_vg_consistent(struct cmd_context *cmd, struct volume_group *vg)
goto restart;
}
if (lv->status & MIRRORED) {
if (lv_is_mirror(lv)) {
if (!mirror_remove_missing(cmd, lv, 1))
return_0;
goto restart;
}
if (arg_count(cmd, mirrorsonly_ARG) &&!(lv->status & MIRRORED)) {
if (arg_count(cmd, mirrorsonly_ARG) && !lv_is_mirrored(lv)) {
log_error("Non-mirror-image LV %s found: can't remove.", lv->name);
continue;
}

View File

@ -71,7 +71,7 @@ static int _move_lvs(struct volume_group *vg_from, struct volume_group *vg_to)
if (lv_is_raid(lv))
continue;
if ((lv->status & MIRRORED))
if (lv_is_mirrored(lv))
continue;
if (lv_is_thin_pool(lv) ||
@ -192,7 +192,7 @@ static int _move_mirrors(struct volume_group *vg_from,
if (lv_is_raid(lv))
continue;
if (!(lv->status & MIRRORED))
if (!lv_is_mirrored(lv))
continue;
seg = first_seg(lv);
@ -551,7 +551,10 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv)
if (!vgs_are_compatible(cmd, vg_from,vg_to))
goto_bad;
} else {
vgcreate_params_set_defaults(&vp_def, vg_from);
if (!vgcreate_params_set_defaults(cmd, &vp_def, vg_from)) {
r = EINVALID_CMD_LINE;
goto_bad;
}
vp_def.vg_name = vg_name_to;
if (!vgcreate_params_set_from_args(cmd, &vp_new, &vp_def)) {
r = EINVALID_CMD_LINE;