mirror of
git://sourceware.org/git/lvm2.git
synced 2025-12-29 06:57:46 +03:00
Compare commits
20 Commits
dev-dct-wr
...
dev-dct-re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76df65d520 | ||
|
|
395849f6ca | ||
|
|
5b15c84d3e | ||
|
|
d4099fdcb0 | ||
|
|
7521b7deaa | ||
|
|
a02cda4e70 | ||
|
|
c36d9a73ac | ||
|
|
77c8e031af | ||
|
|
92534a31eb | ||
|
|
b1ceee55a6 | ||
|
|
4de9e82b75 | ||
|
|
a3601b4004 | ||
|
|
21f7dfe510 | ||
|
|
da39cf4a9e | ||
|
|
b536ae04d5 | ||
|
|
ce8d1ed130 | ||
|
|
a812f98c42 | ||
|
|
838ba5930c | ||
|
|
990c824c21 | ||
|
|
4eff1da46f |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -30,7 +30,7 @@ make.tmpl
|
||||
/config.log
|
||||
/config.status
|
||||
/configure.scan
|
||||
/cscope.*
|
||||
/cscope.out
|
||||
/html/
|
||||
/reports/
|
||||
/tags
|
||||
@@ -38,10 +38,6 @@ make.tmpl
|
||||
|
||||
coverity/coverity_model.xml
|
||||
|
||||
# gcov files:
|
||||
*.gcda
|
||||
*.gcno
|
||||
|
||||
tools/man-generator
|
||||
tools/man-generator.c
|
||||
|
||||
|
||||
11
README
11
README
@@ -1,6 +1,7 @@
|
||||
This tree contains the LVM2 and device-mapper tools and libraries.
|
||||
|
||||
This is development branch, for stable 2.02 release see stable-2.02 branch.
|
||||
This is development branch, for stable 2.02 release see 2018-06-01-stable
|
||||
branch.
|
||||
|
||||
For more information about LVM2 read the changelog in the WHATS_NEW file.
|
||||
Installation instructions are in INSTALL.
|
||||
@@ -9,6 +10,7 @@ There is no warranty - see COPYING and COPYING.LIB.
|
||||
|
||||
Tarballs are available from:
|
||||
ftp://sourceware.org/pub/lvm2/
|
||||
ftp://sources.redhat.com/pub/lvm2/
|
||||
https://github.com/lvmteam/lvm2/releases
|
||||
|
||||
The source code is stored in git:
|
||||
@@ -43,9 +45,6 @@ Report upstream bugs at:
|
||||
or open issues at:
|
||||
https://github.com/lvmteam/lvm2/issues
|
||||
|
||||
The source code repository used until 7th June 2012 is accessible using CVS:
|
||||
The source code repository used until 7th June 2012 is accessible here:
|
||||
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/?cvsroot=lvm2.
|
||||
|
||||
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 login cvs
|
||||
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 checkout LVM2
|
||||
|
||||
The password is cvs.
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.02.175-git (2020-08-09)
|
||||
1.02.155-git (2018-10-31)
|
||||
|
||||
130
WHATS_NEW
130
WHATS_NEW
@@ -1,125 +1,5 @@
|
||||
Version 2.03.11 -
|
||||
==================================
|
||||
Enhance error handling for fsadm and hanled correct fsck result.
|
||||
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
|
||||
Support using BLKZEROOUT for clearing devices.
|
||||
Support interruption when wipping LVs.
|
||||
Support interruption for bcache waiting.
|
||||
Fix bcache when device has too many failing writes.
|
||||
Fix bcache waiting for IO completion with failing disks.
|
||||
Configure use own python path name order to prefer using python3.
|
||||
Add configure --enable-editline support as an alternative to readline.
|
||||
Enhance reporting and error handling when creating thin volumes.
|
||||
Enable vgsplit for VDO volumes.
|
||||
Lvextend of vdo pool volumes ensure at least 1 new VDO slab is added.
|
||||
Use revert_lv() on reload error path after vg_revert().
|
||||
Configure --with-integrity enabled.
|
||||
Restore lost signal blocking while VG lock is held.
|
||||
Improve estimation of needed extents when creating thin-pool.
|
||||
Use extra 1% when resizing thin-pool metadata LV with --use-policy.
|
||||
Enhance --use-policy percentage rounding.
|
||||
Configure --with-vdo and --with-writecache as internal segments.
|
||||
Improving VDO man page examples.
|
||||
Switch code base to use flexible array syntax.
|
||||
Fix 64bit math when calculation cachevol size.
|
||||
Preserve uint32_t for seqno handling.
|
||||
Switch from mmap to plain read when loading regular files.
|
||||
Update lvmvdo man page and better explain DISCARD usage.
|
||||
|
||||
Version 2.03.10 - 09th August 2020
|
||||
==================================
|
||||
Add writecache and integrity support to lvmdbusd.
|
||||
Generate unique cachevol name when default required from lvcreate.
|
||||
Converting RAID1 volume to one with same number of legs now succeeds with a
|
||||
warning.
|
||||
Fix conversion to raid from striped lagging type.
|
||||
Fix conversion to 'mirrored' mirror log with larger regionsize.
|
||||
Zero pool metadata on allocation (disable with allocation/zero_metadata=0).
|
||||
Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn).
|
||||
Add lvcreate of new cache or writecache lv with single command.
|
||||
Fix running out of free buffers for async writing for larger writes.
|
||||
Add integrity with raid capability.
|
||||
Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
|
||||
|
||||
Version 2.03.09 - 26th March 2020
|
||||
=================================
|
||||
Fix formating of vdopool (vdo_slab_size_mb was smaller by 2 bits).
|
||||
Fix showing of a dm kernel error when uncaching a volume with cachevol.
|
||||
|
||||
Version 2.03.08 - 11th February 2020
|
||||
====================================
|
||||
Prevent problematic snapshots of writecache volumes.
|
||||
Add error handling for failing allocation in _reserve_area().
|
||||
Fix memleak in syncing of internal cache.
|
||||
Fix pvck dump_current_text memleak.
|
||||
Fix lvmlockd result code on error path for _query_lock_lv().
|
||||
Update pvck man page and help output.
|
||||
Reject invalid writecache high/low_watermark setting.
|
||||
Report writecache status.
|
||||
Accept more output lines from vdo_format.
|
||||
Prohibit reshaping of stacked raid LVs.
|
||||
Avoid running cache input arg validation when creating vdo pool.
|
||||
Prevent raid reshaping of stacked volumes.
|
||||
Added VDO lvmdbusd methods for enable/disable compression & dedupe.
|
||||
Added VDO lvmdbusd method for converting LV to VDO pool.
|
||||
|
||||
Version 2.03.07 - 30th November 2019
|
||||
====================================
|
||||
Subcommand in vgck for repairing headers and metadata.
|
||||
Ensure minimum required region size on striped RaidLV creation.
|
||||
Fix resize of thin-pool with data and metadata of different segtype.
|
||||
Improve mirror type leg splitting.
|
||||
Improve error path handling in daemons on shutdown.
|
||||
Fix activation order when removing merged snapshot.
|
||||
Experimental VDO support for lvmdbusd.
|
||||
|
||||
Version 2.03.06 - 23rd October 2019
|
||||
Version 2.03.02 -
|
||||
===================================
|
||||
Add _cpool suffix to cache-pool LV name when used by caching LV.
|
||||
No longer store extra UUID for cmeta and cdata cachevol layer.
|
||||
Enhance activation of cache devices with cachevols.
|
||||
Add _cvol in list of protected suffixes and start use it with DM UUID.
|
||||
Rename LV converted to cachevol to use _cvol suffix.
|
||||
Use normal LVs for wiping of cachevols.
|
||||
Reload cleanered cache DM only with cleaner policy.
|
||||
Fix cmd return when zeroing of cachevol fails.
|
||||
Extend lvs to show all VDO properties.
|
||||
Preserve VDO write policy with vdopool.
|
||||
Increase default vdo bio threads to 4.
|
||||
Continue report when cache_status fails.
|
||||
Add support for DM_DEVICE_GET_TARGET_VERSION into device_mapper.
|
||||
Fix cmirrord usage of header files from device_mapper subdir.
|
||||
Allow standalone activation of VDO pool just like for thin-pools.
|
||||
Activate thin-pool layered volume as 'read-only' device.
|
||||
Ignore crypto devices with UUID signature CRYPT-SUBDEV.
|
||||
Enhance validation for thin and cache pool conversion and swapping.
|
||||
Improve internal removal of cached devices.
|
||||
Synchronize with udev when dropping snapshot.
|
||||
Add missing device synchronization point before removing pvmove node.
|
||||
Correctly set read_ahead for LVs when pvmove is finished.
|
||||
Remove unsupported OPTIONS+="event_timeout" udev rule from 11-dm-lvm.rules.
|
||||
Prevent creating VGs with PVs with different logical block sizes.
|
||||
Fix metadata writes from corrupting with large physical block size.
|
||||
|
||||
Version 2.03.05 - 15th June 2019
|
||||
================================
|
||||
Fix command definition for pvchange -a.
|
||||
Add vgck --updatemetadata command that will repair metadata problems.
|
||||
Improve VG reading to work if one good copy of metadata is found.
|
||||
Report/display/scan commands that read VGs will no longer write/repair.
|
||||
Move metadata repairs from VG reading to VG writing.
|
||||
Add config setting md_component_checks to control MD component checks.
|
||||
Add end of device MD component checks when dev has no udev info.
|
||||
|
||||
Version 2.03.04 - 10th June 2019
|
||||
================================
|
||||
Remove unused_duplicate_devs from cmd causing segfault in dmeventd.
|
||||
|
||||
Version 2.03.03 - 07th June 2019
|
||||
================================
|
||||
Report no_discard_passdown for cache LVs with lvs -o+kernel_discards.
|
||||
Add pvck --dump option to extract metadata.
|
||||
Fix signal delivery checking race in libdaemon (lvmetad).
|
||||
Add missing Before=shutdown.target to LVM2 services to fix shutdown ordering.
|
||||
Skip autoactivation for a PV when PV size does not match device size.
|
||||
Remove first-pvscan-initialization which should no longer be needed.
|
||||
@@ -152,14 +32,10 @@ Version 2.03.03 - 07th June 2019
|
||||
Fix missing unlock on lvm2 dmeventd plugin error path initialization.
|
||||
Improve Makefile dependency tracking.
|
||||
Move VDO support towards V2 target (6.2) support.
|
||||
|
||||
Version 2.03.02 - 18th December 2018
|
||||
====================================
|
||||
Fix missing proper initialization of pv_list struct when adding pv.
|
||||
Fix (de)activation of RaidLVs with visible SubLVs.
|
||||
Prohibit mirrored 'mirror' log via lvcreate and lvconvert.
|
||||
Fix (de)activation of RaidLVs with visible SubLVs
|
||||
Prohibit mirrored 'mirror' log via lvcreate and lvconvert
|
||||
Use sync io if async io_setup fails, or use_aio=0 is set in config.
|
||||
Fix more issues reported by coverity scan.
|
||||
|
||||
Version 2.03.01 - 31st October 2018
|
||||
===================================
|
||||
|
||||
39
WHATS_NEW_DM
39
WHATS_NEW_DM
@@ -1,47 +1,10 @@
|
||||
Version 1.02.175 -
|
||||
===================================
|
||||
|
||||
Version 1.02.173 - 09th August 2020
|
||||
===================================
|
||||
Add support for VDO in blkdeactivate script.
|
||||
|
||||
Version 1.02.171 - 26th March 2020
|
||||
==================================
|
||||
Try to remove all created devices on dm preload tree error path.
|
||||
Fix dm_list interators with gcc 10 optimization (-ftree-pta).
|
||||
Dmeventd handles timer without looping on short intervals.
|
||||
|
||||
Version 1.02.169 - 11th February 2020
|
||||
=====================================
|
||||
Enhance error messages for device creation.
|
||||
|
||||
Version 1.02.167 - 30th November 2019
|
||||
=====================================
|
||||
|
||||
Version 1.02.165 - 23rd October 2019
|
||||
Version 1.02.155 -
|
||||
====================================
|
||||
Add support for DM_DEVICE_GET_TARGET_VERSION.
|
||||
Add debug of dmsetup udevcomplete with hexa print DM_COOKIE_COMPLETED.
|
||||
Fix versioning of dm_stats_create_region and dm_stats_create_region.
|
||||
|
||||
Version 1.02.163 - 15th June 2019
|
||||
=================================
|
||||
|
||||
Version 1.02.161 - 10th June 2019
|
||||
=================================
|
||||
|
||||
Version 1.02.159 - 07th June 2019
|
||||
=================================
|
||||
Parsing of cache status understand no_discard_passdown.
|
||||
Ensure migration_threshold for cache is at least 8 chunks.
|
||||
|
||||
Version 1.02.155 - 18th December 2018
|
||||
=====================================
|
||||
Include correct internal header inside libdm list.c.
|
||||
Enhance ioctl flattening and add parameters only when needed.
|
||||
Add DM_DEVICE_ARM_POLL for API completness matching kernel.
|
||||
Do not add parameters for RESUME with DM_DEVICE_CREATE dm task.
|
||||
Fix dmstats report printing no output.
|
||||
|
||||
Version 1.02.153 - 31st October 2018
|
||||
====================================
|
||||
|
||||
16
aclocal.m4
vendored
16
aclocal.m4
vendored
@@ -1,6 +1,6 @@
|
||||
# generated automatically by aclocal 1.16.2 -*- Autoconf -*-
|
||||
# generated automatically by aclocal 1.15.1 -*- Autoconf -*-
|
||||
|
||||
# Copyright (C) 1996-2020 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
|
||||
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -413,7 +413,7 @@ AS_IF([test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"],
|
||||
[AC_DEFINE([HAVE_][$1], 1, [Enable ]m4_tolower([$1])[ support])])
|
||||
])dnl PKG_HAVE_DEFINE_WITH_MODULES
|
||||
|
||||
# Copyright (C) 1999-2020 Free Software Foundation, Inc.
|
||||
# Copyright (C) 1999-2017 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
@@ -446,12 +446,10 @@ AC_DEFUN([AM_PATH_PYTHON],
|
||||
[
|
||||
dnl Find a Python interpreter. Python versions prior to 2.0 are not
|
||||
dnl supported. (2.0 was released on October 16, 2000).
|
||||
dnl FIXME: Remove the need to hard-code Python versions here.
|
||||
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],
|
||||
[python python2 python3 dnl
|
||||
python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 dnl
|
||||
python3.2 python3.1 python3.0 dnl
|
||||
python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 dnl
|
||||
python2.0])
|
||||
[python python2 python3 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 python2.7 dnl
|
||||
python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0])
|
||||
|
||||
AC_ARG_VAR([PYTHON], [the Python interpreter])
|
||||
|
||||
@@ -651,7 +649,7 @@ for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]]
|
||||
sys.exit(sys.hexversion < minverhex)"
|
||||
AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])])
|
||||
|
||||
# Copyright (C) 2001-2020 Free Software Foundation, Inc.
|
||||
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
|
||||
#
|
||||
# This file is free software; the Free Software Foundation
|
||||
# gives unlimited permission to copy and/or distribute it,
|
||||
|
||||
@@ -22,7 +22,7 @@ struct dm_hash_node {
|
||||
void *data;
|
||||
unsigned data_len;
|
||||
unsigned keylen;
|
||||
char key[];
|
||||
char key[0];
|
||||
};
|
||||
|
||||
struct dm_hash_table {
|
||||
@@ -59,27 +59,26 @@ static unsigned char _nums[] = {
|
||||
209
|
||||
};
|
||||
|
||||
static struct dm_hash_node *_create_node(const void *key, unsigned len)
|
||||
static struct dm_hash_node *_create_node(const char *str, unsigned len)
|
||||
{
|
||||
struct dm_hash_node *n = malloc(sizeof(*n) + len);
|
||||
|
||||
if (n) {
|
||||
memcpy(n->key, key, len);
|
||||
memcpy(n->key, str, len);
|
||||
n->keylen = len;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static unsigned long _hash(const void *key, unsigned len)
|
||||
static unsigned long _hash(const char *str, unsigned len)
|
||||
{
|
||||
const unsigned char *str = key;
|
||||
unsigned long h = 0, g;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
h <<= 4;
|
||||
h += _nums[*str++];
|
||||
h += _nums[(unsigned char) *str++];
|
||||
g = h & ((unsigned long) 0xf << 16u);
|
||||
if (g) {
|
||||
h ^= g >> 16u;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#ifndef BASE_DATA_STRUCT_LIST_H
|
||||
#define BASE_DATA_STRUCT_LIST_H
|
||||
|
||||
#include "base/memory/container_of.h"
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
/*
|
||||
@@ -100,7 +98,7 @@ struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *e
|
||||
* contained in a structure of type t, return the containing structure.
|
||||
*/
|
||||
#define dm_list_struct_base(v, t, head) \
|
||||
container_of(v, t, head)
|
||||
((t *)((const char *)(v) - (const char *)&((t *) 0)->head))
|
||||
|
||||
/*
|
||||
* Given the address v of an instance of 'struct dm_list list' contained in
|
||||
@@ -113,7 +111,7 @@ struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *e
|
||||
* return another element f.
|
||||
*/
|
||||
#define dm_struct_field(v, t, e, f) \
|
||||
(((t *)((uintptr_t)(v) - offsetof(t, e)))->f)
|
||||
(((t *)((uintptr_t)(v) - (uintptr_t)&((t *) 0)->e))->f)
|
||||
|
||||
/*
|
||||
* Given the address v of a known element e in a known structure of type t,
|
||||
|
||||
@@ -47,7 +47,7 @@ struct value_chain {
|
||||
struct prefix_chain {
|
||||
struct value child;
|
||||
unsigned len;
|
||||
uint8_t prefix[];
|
||||
uint8_t prefix[0];
|
||||
};
|
||||
|
||||
struct node4 {
|
||||
@@ -1032,7 +1032,7 @@ void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
|
||||
{
|
||||
struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
|
||||
if (lr.kb == ke || _prefix_chain_matches(&lr, ke))
|
||||
(void) _iterate(lr.v, it);
|
||||
_iterate(lr.v, it);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (C) 2018 - 2020 Red Hat, Inc. All rights reserved.
|
||||
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
|
||||
//
|
||||
// This file is part of LVM2.
|
||||
//
|
||||
@@ -13,12 +13,10 @@
|
||||
#ifndef BASE_MEMORY_CONTAINER_OF_H
|
||||
#define BASE_MEMORY_CONTAINER_OF_H
|
||||
|
||||
#include <stddef.h> // offsetof
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
#define container_of(v, t, head) \
|
||||
((t *)((char *)(v) - offsetof(t, head)))
|
||||
((t *)((const char *)(v) - (const char *)&((t *) 0)->head))
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -88,22 +88,6 @@ devices {
|
||||
#
|
||||
external_device_info_source = "none"
|
||||
|
||||
# Configuration option devices/hints.
|
||||
# Use a local file to remember which devices have PVs on them.
|
||||
# Some commands will use this as an optimization to reduce device
|
||||
# scanning, and will only scan the listed PVs. Removing the hint file
|
||||
# will cause lvm to generate a new one. Disable hints if PVs will
|
||||
# be copied onto devices using non-lvm commands, like dd.
|
||||
#
|
||||
# Accepted values:
|
||||
# all
|
||||
# Use all hints.
|
||||
# none
|
||||
# Use no hints.
|
||||
#
|
||||
# This configuration option has an automatic default value.
|
||||
# hints = "all"
|
||||
|
||||
# Configuration option devices/preferred_names.
|
||||
# Select which path name to display for a block device.
|
||||
# If multiple path names exist for a block device, and LVM needs to
|
||||
@@ -142,7 +126,7 @@ devices {
|
||||
#
|
||||
# Example
|
||||
# Accept every block device:
|
||||
# filter = [ "a|.*|" ]
|
||||
# filter = [ "a|.*/|" ]
|
||||
# Reject the cdrom drive:
|
||||
# filter = [ "r|/dev/cdrom|" ]
|
||||
# Work with just loopback devices, e.g. for testing:
|
||||
@@ -150,10 +134,10 @@ devices {
|
||||
# Accept all loop devices and ide drives except hdc:
|
||||
# filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
|
||||
# Use anchors to be very specific:
|
||||
# filter = [ "a|^/dev/hda8$|", "r|.*|" ]
|
||||
# filter = [ "a|^/dev/hda8$|", "r|.*/|" ]
|
||||
#
|
||||
# This configuration option has an automatic default value.
|
||||
# filter = [ "a|.*|" ]
|
||||
# filter = [ "a|.*/|" ]
|
||||
|
||||
# Configuration option devices/global_filter.
|
||||
# Limit the block devices that are used by LVM system components.
|
||||
@@ -163,7 +147,7 @@ devices {
|
||||
# The syntax is the same as devices/filter. Devices rejected by
|
||||
# global_filter are not opened by LVM.
|
||||
# This configuration option has an automatic default value.
|
||||
# global_filter = [ "a|.*|" ]
|
||||
# global_filter = [ "a|.*/|" ]
|
||||
|
||||
# Configuration option devices/types.
|
||||
# List of additional acceptable block device types.
|
||||
@@ -183,52 +167,17 @@ devices {
|
||||
sysfs_scan = 1
|
||||
|
||||
# Configuration option devices/scan_lvs.
|
||||
# Scan LVM LVs for layered PVs, allowing LVs to be used as PVs.
|
||||
# When 1, LVM will detect PVs layered on LVs, and caution must be
|
||||
# taken to avoid a host accessing a layered VG that may not belong
|
||||
# to it, e.g. from a guest image. This generally requires excluding
|
||||
# the LVs with device filters. Also, when this setting is enabled,
|
||||
# every LVM command will scan every active LV on the system (unless
|
||||
# filtered), which can cause performance problems on systems with
|
||||
# many active LVs. When this setting is 0, LVM will not detect or
|
||||
# use PVs that exist on LVs, and will not allow a PV to be created on
|
||||
# an LV. The LVs are ignored using a built in device filter that
|
||||
# identifies and excludes LVs.
|
||||
scan_lvs = 0
|
||||
# Scan LVM LVs for layered PVs.
|
||||
scan_lvs = 1
|
||||
|
||||
# Configuration option devices/multipath_component_detection.
|
||||
# Ignore devices that are components of DM multipath devices.
|
||||
multipath_component_detection = 1
|
||||
|
||||
# Configuration option devices/md_component_detection.
|
||||
# Enable detection and exclusion of MD component devices.
|
||||
# An MD component device is a block device that MD uses as part
|
||||
# of a software RAID virtual device. When an LVM PV is created
|
||||
# on an MD device, LVM must only use the top level MD device as
|
||||
# the PV, and should ignore the underlying component devices.
|
||||
# In cases where the MD superblock is located at the end of the
|
||||
# component devices, it is more difficult for LVM to consistently
|
||||
# identify an MD component, see the md_component_checks setting.
|
||||
# Ignore devices that are components of software RAID (md) devices.
|
||||
md_component_detection = 1
|
||||
|
||||
# Configuration option devices/md_component_checks.
|
||||
# The checks LVM should use to detect MD component devices.
|
||||
# MD component devices are block devices used by MD software RAID.
|
||||
#
|
||||
# Accepted values:
|
||||
# auto
|
||||
# LVM will skip scanning the end of devices when it has other
|
||||
# indications that the device is not an MD component.
|
||||
# start
|
||||
# LVM will only scan the start of devices for MD superblocks.
|
||||
# This does not incur extra I/O by LVM.
|
||||
# full
|
||||
# LVM will scan the start and end of devices for MD superblocks.
|
||||
# This requires an extra read at the end of devices.
|
||||
#
|
||||
# This configuration option has an automatic default value.
|
||||
# md_component_checks = "auto"
|
||||
|
||||
# Configuration option devices/fw_raid_component_detection.
|
||||
# Ignore devices that are components of firmware RAID devices.
|
||||
# LVM must use an external_device_info_source other than none for this
|
||||
@@ -347,12 +296,6 @@ devices {
|
||||
# Enabling this setting allows the VG to be used as usual even with
|
||||
# uncertain devices.
|
||||
allow_changes_with_duplicate_pvs = 0
|
||||
|
||||
# Configuration option devices/allow_mixed_block_sizes.
|
||||
# Allow PVs in the same VG with different logical block sizes.
|
||||
# When allowed, the user is responsible to ensure that an LV is
|
||||
# using PVs with matching block sizes when necessary.
|
||||
allow_mixed_block_sizes = 0
|
||||
}
|
||||
|
||||
# Configuration section allocation.
|
||||
@@ -429,8 +372,7 @@ allocation {
|
||||
|
||||
# Configuration option allocation/cache_pool_metadata_require_separate_pvs.
|
||||
# Cache pool metadata and data will always use different PVs.
|
||||
# This configuration option has an automatic default value.
|
||||
# cache_pool_metadata_require_separate_pvs = 0
|
||||
cache_pool_metadata_require_separate_pvs = 0
|
||||
|
||||
# Configuration option allocation/cache_metadata_format.
|
||||
# Sets default metadata format for new cache.
|
||||
@@ -489,9 +431,8 @@ allocation {
|
||||
# This configuration option does not have a default value defined.
|
||||
|
||||
# Configuration option allocation/thin_pool_metadata_require_separate_pvs.
|
||||
# Thin pool metadata and data will always use different PVs.
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_pool_metadata_require_separate_pvs = 0
|
||||
# Thin pool metdata and data will always use different PVs.
|
||||
thin_pool_metadata_require_separate_pvs = 0
|
||||
|
||||
# Configuration option allocation/thin_pool_zero.
|
||||
# Thin pool data chunks are zeroed before they are first used.
|
||||
@@ -528,11 +469,6 @@ allocation {
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_pool_chunk_size_policy = "generic"
|
||||
|
||||
# Configuration option allocation/zero_metadata.
|
||||
# Zero whole metadata area before use with thin or cache pool.
|
||||
# This configuration option has an automatic default value.
|
||||
# zero_metadata = 1
|
||||
|
||||
# Configuration option allocation/thin_pool_chunk_size.
|
||||
# The minimal chunk size in KiB for thin pool volumes.
|
||||
# Larger chunk sizes may improve performance for plain thin volumes,
|
||||
@@ -628,7 +564,7 @@ allocation {
|
||||
# Each additional thread after the first will use an additional 18MiB of RAM,
|
||||
# plus 1.12 MiB of RAM per megabyte of configured read cache size.
|
||||
# This configuration option has an automatic default value.
|
||||
# vdo_bio_threads = 4
|
||||
# vdo_bio_threads = 1
|
||||
|
||||
# Configuration option allocation/vdo_bio_rotation.
|
||||
# Specifies the number of I/O operations to enqueue for each bio-submission
|
||||
@@ -784,8 +720,7 @@ log {
|
||||
|
||||
# Configuration option log/indent.
|
||||
# Indent messages according to their severity.
|
||||
# This configuration option has an automatic default value.
|
||||
# indent = 0
|
||||
indent = 1
|
||||
|
||||
# Configuration option log/command_names.
|
||||
# Display the command name on each line of output.
|
||||
@@ -811,20 +746,6 @@ log {
|
||||
# available: memory, devices, io, activation, allocation,
|
||||
# metadata, cache, locking, lvmpolld. Use "all" to see everything.
|
||||
debug_classes = [ "memory", "devices", "io", "activation", "allocation", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
|
||||
|
||||
# Configuration option log/debug_file_fields.
|
||||
# The fields included in debug output written to log file.
|
||||
# Use "all" to include everything (the default).
|
||||
# This configuration option is advanced.
|
||||
# This configuration option has an automatic default value.
|
||||
# debug_file_fields = [ "time", "command", "fileline", "message" ]
|
||||
|
||||
# Configuration option log/debug_output_fields.
|
||||
# The fields included in debug output written to stderr.
|
||||
# Use "all" to include everything (the default).
|
||||
# This configuration option is advanced.
|
||||
# This configuration option has an automatic default value.
|
||||
# debug_output_fields = [ "time", "command", "fileline", "message" ]
|
||||
}
|
||||
|
||||
# Configuration section backup.
|
||||
@@ -912,6 +833,9 @@ global {
|
||||
# the error messages.
|
||||
activation = 1
|
||||
|
||||
# Configuration option global/segment_libraries.
|
||||
# This configuration option does not have a default value defined.
|
||||
|
||||
# Configuration option global/proc.
|
||||
# Location of proc filesystem.
|
||||
# This configuration option is advanced.
|
||||
@@ -993,8 +917,7 @@ global {
|
||||
# logs and conversion to disk/core works.
|
||||
#
|
||||
# Not supported for regular operation!
|
||||
# This configuration option has an automatic default value.
|
||||
# support_mirrored_mirror_log = 0
|
||||
support_mirrored_mirror_log = 0
|
||||
|
||||
# Configuration option global/raid10_segtype_default.
|
||||
# The segment type used by the -i -m combination.
|
||||
@@ -1052,8 +975,7 @@ global {
|
||||
# activated from these events (the default is all.)
|
||||
# When event_activation is disabled, the system will generally run
|
||||
# a direct activation command to activate LVs in complete VGs.
|
||||
# This configuration option has an automatic default value.
|
||||
# event_activation = 1
|
||||
event_activation = 1
|
||||
|
||||
# Configuration option global/use_aio.
|
||||
# Use async I/O when reading and writing devices.
|
||||
@@ -1252,16 +1174,6 @@ global {
|
||||
# When enabled, an LVM command that changes PVs, changes VG metadata,
|
||||
# or changes the activation state of an LV will send a notification.
|
||||
notify_dbus = 1
|
||||
|
||||
# Configuration option global/io_memory_size.
|
||||
# The amount of memory in KiB that LVM allocates to perform disk io.
|
||||
# LVM performance may benefit from more io memory when there are many
|
||||
# disks or VG metadata is large. Increasing this size may be necessary
|
||||
# when a single copy of VG metadata is larger than the current setting.
|
||||
# This value should usually not be decreased from the default; setting
|
||||
# it too low can result in lvm failing to read VGs.
|
||||
# This configuration option has an automatic default value.
|
||||
# io_memory_size = 8192
|
||||
}
|
||||
|
||||
# Configuration section activation.
|
||||
@@ -1297,8 +1209,7 @@ activation {
|
||||
# This enables additional checks (and if necessary, repairs) on entries
|
||||
# in the device directory after udev has completed processing its
|
||||
# events. Useful for diagnosing problems with LVM/udev interactions.
|
||||
# This configuration option has an automatic default value.
|
||||
# verify_udev_operations = 0
|
||||
verify_udev_operations = 0
|
||||
|
||||
# Configuration option activation/retry_deactivation.
|
||||
# Retry failed LV deactivation.
|
||||
@@ -1323,27 +1234,23 @@ activation {
|
||||
# When disabled, the striped target is used. The linear target is an
|
||||
# optimised version of the striped target that only handles a single
|
||||
# stripe.
|
||||
# This configuration option has an automatic default value.
|
||||
# use_linear_target = 1
|
||||
use_linear_target = 1
|
||||
|
||||
# Configuration option activation/reserved_stack.
|
||||
# Stack size in KiB to reserve for use while devices are suspended.
|
||||
# Insufficent reserve risks I/O deadlock during device suspension.
|
||||
# This configuration option has an automatic default value.
|
||||
# reserved_stack = 64
|
||||
reserved_stack = 64
|
||||
|
||||
# Configuration option activation/reserved_memory.
|
||||
# Memory size in KiB to reserve for use while devices are suspended.
|
||||
# Insufficent reserve risks I/O deadlock during device suspension.
|
||||
# This configuration option has an automatic default value.
|
||||
# reserved_memory = 8192
|
||||
reserved_memory = 8192
|
||||
|
||||
# Configuration option activation/process_priority.
|
||||
# Nice value used while devices are suspended.
|
||||
# Use a high priority so that LVs are suspended
|
||||
# for the shortest possible time.
|
||||
# This configuration option has an automatic default value.
|
||||
# process_priority = -18
|
||||
process_priority = -18
|
||||
|
||||
# Configuration option activation/volume_list.
|
||||
# Only LVs selected by this list are activated.
|
||||
@@ -1460,8 +1367,7 @@ activation {
|
||||
# auto
|
||||
# Use default value chosen by kernel.
|
||||
#
|
||||
# This configuration option has an automatic default value.
|
||||
# readahead = "auto"
|
||||
readahead = "auto"
|
||||
|
||||
# Configuration option activation/raid_fault_policy.
|
||||
# Defines how a device failure in a RAID LV is handled.
|
||||
@@ -1597,8 +1503,7 @@ activation {
|
||||
# 8.4G, it is extended to 14.4G:
|
||||
# vdo_pool_autoextend_threshold = 70
|
||||
#
|
||||
# This configuration option has an automatic default value.
|
||||
# vdo_pool_autoextend_threshold = 100
|
||||
vdo_pool_autoextend_threshold = 100
|
||||
|
||||
# Configuration option activation/vdo_pool_autoextend_percent.
|
||||
# Auto-extending a VDO pool adds this percent extra space.
|
||||
@@ -1634,8 +1539,7 @@ activation {
|
||||
# Use the old behavior of mlockall to pin all memory.
|
||||
# Prior to version 2.02.62, LVM used mlockall() to pin the whole
|
||||
# process's memory while activating devices.
|
||||
# This configuration option has an automatic default value.
|
||||
# use_mlockall = 0
|
||||
use_mlockall = 0
|
||||
|
||||
# Configuration option activation/monitoring.
|
||||
# Monitor LVs that are activated.
|
||||
@@ -1650,8 +1554,7 @@ activation {
|
||||
# intervals of this number of seconds. If this is set to 0 and there
|
||||
# is only one thing to wait for, there are no progress reports, but
|
||||
# the process is awoken immediately once the operation is complete.
|
||||
# This configuration option has an automatic default value.
|
||||
# polling_interval = 15
|
||||
polling_interval = 15
|
||||
|
||||
# Configuration option activation/auto_set_activation_skip.
|
||||
# Set the activation skip flag on new thin snapshot LVs.
|
||||
@@ -1772,6 +1675,7 @@ activation {
|
||||
# additional space for VG metadata. The --metadatasize option overrides
|
||||
# this setting.
|
||||
# This configuration option does not have a default value defined.
|
||||
# This configuration option has an automatic default value.
|
||||
|
||||
# Configuration option metadata/pvmetadataignore.
|
||||
# Ignore metadata areas on a new PV.
|
||||
@@ -2207,8 +2111,7 @@ dmeventd {
|
||||
# failures. It removes failed devices from a volume group and
|
||||
# reconfigures a mirror as necessary. If no mirror library is
|
||||
# provided, mirrors are not monitored through dmeventd.
|
||||
# This configuration option has an automatic default value.
|
||||
# mirror_library = "libdevmapper-event-lvm2mirror.so"
|
||||
mirror_library = "libdevmapper-event-lvm2mirror.so"
|
||||
|
||||
# Configuration option dmeventd/raid_library.
|
||||
# This configuration option has an automatic default value.
|
||||
@@ -2219,16 +2122,14 @@ dmeventd {
|
||||
# libdevmapper-event-lvm2snapshot.so monitors the filling of snapshots
|
||||
# and emits a warning through syslog when the usage exceeds 80%. The
|
||||
# warning is repeated when 85%, 90% and 95% of the snapshot is filled.
|
||||
# This configuration option has an automatic default value.
|
||||
# snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
||||
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
|
||||
|
||||
# Configuration option dmeventd/thin_library.
|
||||
# The library dmeventd uses when monitoring a thin device.
|
||||
# libdevmapper-event-lvm2thin.so monitors the filling of a pool
|
||||
# and emits a warning through syslog when the usage exceeds 80%. The
|
||||
# warning is repeated when 85%, 90% and 95% of the pool is filled.
|
||||
# This configuration option has an automatic default value.
|
||||
# thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
thin_library = "libdevmapper-event-lvm2thin.so"
|
||||
|
||||
# Configuration option dmeventd/thin_command.
|
||||
# The plugin runs command with each 5% increment when thin-pool data volume
|
||||
|
||||
349
configure
vendored
349
configure
vendored
@@ -742,7 +742,6 @@ CLDNOWHOLEARCHIVE
|
||||
CLDFLAGS
|
||||
CACHE
|
||||
BUILD_DMFILEMAPD
|
||||
BUILD_LOCKDDLM_CONTROL
|
||||
BUILD_LOCKDDLM
|
||||
BUILD_LOCKDSANLOCK
|
||||
BUILD_LVMLOCKD
|
||||
@@ -753,8 +752,6 @@ BUILD_CMIRRORD
|
||||
BLKID_PC
|
||||
MODPROBE_CMD
|
||||
MSGFMT
|
||||
EDITLINE_LIBS
|
||||
EDITLINE_CFLAGS
|
||||
PYTHON3_CONFIG
|
||||
pkgpyexecdir
|
||||
pyexecdir
|
||||
@@ -774,8 +771,6 @@ BLKID_LIBS
|
||||
BLKID_CFLAGS
|
||||
NOTIFY_DBUS_LIBS
|
||||
NOTIFY_DBUS_CFLAGS
|
||||
LOCKD_DLM_CONTROL_LIBS
|
||||
LOCKD_DLM_CONTROL_CFLAGS
|
||||
LOCKD_DLM_LIBS
|
||||
LOCKD_DLM_CFLAGS
|
||||
LOCKD_SANLOCK_LIBS
|
||||
@@ -920,9 +915,7 @@ enable_cache_check_needs_check
|
||||
with_vdo
|
||||
with_vdo_format
|
||||
with_writecache
|
||||
with_integrity
|
||||
enable_readline
|
||||
enable_editline
|
||||
enable_realtime
|
||||
enable_ocf
|
||||
with_ocfdir
|
||||
@@ -939,7 +932,6 @@ enable_devmapper
|
||||
enable_lvmpolld
|
||||
enable_lvmlockd_sanlock
|
||||
enable_lvmlockd_dlm
|
||||
enable_lvmlockd_dlmcontrol
|
||||
enable_use_lvmlockd
|
||||
with_lvmlockd_pidfile
|
||||
enable_use_lvmpolld
|
||||
@@ -963,7 +955,6 @@ enable_fsadm
|
||||
enable_blkdeactivate
|
||||
enable_dmeventd
|
||||
enable_selinux
|
||||
enable_blkzeroout
|
||||
enable_nls
|
||||
with_localedir
|
||||
with_confdir
|
||||
@@ -1009,8 +1000,6 @@ LOCKD_SANLOCK_CFLAGS
|
||||
LOCKD_SANLOCK_LIBS
|
||||
LOCKD_DLM_CFLAGS
|
||||
LOCKD_DLM_LIBS
|
||||
LOCKD_DLM_CONTROL_CFLAGS
|
||||
LOCKD_DLM_CONTROL_LIBS
|
||||
NOTIFY_DBUS_CFLAGS
|
||||
NOTIFY_DBUS_LIBS
|
||||
BLKID_CFLAGS
|
||||
@@ -1019,9 +1008,7 @@ SYSTEMD_CFLAGS
|
||||
SYSTEMD_LIBS
|
||||
UDEV_CFLAGS
|
||||
UDEV_LIBS
|
||||
PYTHON
|
||||
EDITLINE_CFLAGS
|
||||
EDITLINE_LIBS'
|
||||
PYTHON'
|
||||
|
||||
|
||||
# Initialize some variables set by options.
|
||||
@@ -1644,7 +1631,6 @@ Optional Features:
|
||||
--disable-cache_check_needs_check
|
||||
required if cache_check version is < 0.5
|
||||
--disable-readline disable readline support
|
||||
--enable-editline enable editline support
|
||||
--disable-realtime disable realtime clock support
|
||||
--enable-ocf enable Open Cluster Framework (OCF) compliant
|
||||
resource agents
|
||||
@@ -1657,8 +1643,6 @@ Optional Features:
|
||||
--enable-lvmlockd-sanlock
|
||||
enable the LVM lock daemon using sanlock
|
||||
--enable-lvmlockd-dlm enable the LVM lock daemon using dlm
|
||||
--enable-lvmlockd-dlmcontrol
|
||||
enable lvmlockd remote refresh using libdlmcontrol
|
||||
--disable-use-lvmlockd disable usage of LVM lock daemon
|
||||
--disable-use-lvmpolld disable usage of LVM Poll Daemon
|
||||
--enable-dmfilemapd enable the dmstats filemap daemon
|
||||
@@ -1685,7 +1669,6 @@ Optional Features:
|
||||
--disable-blkdeactivate disable blkdeactivate
|
||||
--enable-dmeventd enable the device-mapper event daemon
|
||||
--disable-selinux disable selinux support
|
||||
--disable-blkzeroout do not use BLKZEROOUT for device zeroing
|
||||
--enable-nls enable Native Language Support
|
||||
|
||||
Optional Packages:
|
||||
@@ -1725,15 +1708,14 @@ Optional Packages:
|
||||
--with-vdo=TYPE vdo support: internal/none [internal]
|
||||
--with-vdo-format=PATH vdoformat tool: [autodetect]
|
||||
--with-writecache=TYPE writecache support: internal/none [internal]
|
||||
--with-integrity=TYPE integrity support: internal/none [internal]
|
||||
--with-ocfdir=DIR install OCF files in
|
||||
[PREFIX/lib/ocf/resource.d/lvm2]
|
||||
--with-default-pid-dir=PID_DIR
|
||||
default directory to keep PID files in [autodetect]
|
||||
Default directory to keep PID files in. [autodetect]
|
||||
--with-default-dm-run-dir=DM_RUN_DIR
|
||||
default DM run directory [autodetect]
|
||||
Default DM run directory. [autodetect]
|
||||
--with-default-run-dir=RUN_DIR
|
||||
default LVM run directory [autodetect_run_dir/lvm]
|
||||
Default LVM run directory. [autodetect_run_dir/lvm]
|
||||
--with-cmirrord-pidfile=PATH
|
||||
cmirrord pidfile [PID_DIR/cmirrord.pid]
|
||||
--with-optimisation=OPT C optimisation flag [OPT=-O2]
|
||||
@@ -1806,10 +1788,6 @@ Some influential environment variables:
|
||||
C compiler flags for LOCKD_DLM, overriding pkg-config
|
||||
LOCKD_DLM_LIBS
|
||||
linker flags for LOCKD_DLM, overriding pkg-config
|
||||
LOCKD_DLM_CONTROL_CFLAGS
|
||||
C compiler flags for LOCKD_DLM_CONTROL, overriding pkg-config
|
||||
LOCKD_DLM_CONTROL_LIBS
|
||||
linker flags for LOCKD_DLM_CONTROL, overriding pkg-config
|
||||
NOTIFY_DBUS_CFLAGS
|
||||
C compiler flags for NOTIFY_DBUS, overriding pkg-config
|
||||
NOTIFY_DBUS_LIBS
|
||||
@@ -1824,10 +1802,6 @@ Some influential environment variables:
|
||||
UDEV_CFLAGS C compiler flags for UDEV, overriding pkg-config
|
||||
UDEV_LIBS linker flags for UDEV, overriding pkg-config
|
||||
PYTHON the Python interpreter
|
||||
EDITLINE_CFLAGS
|
||||
C compiler flags for EDITLINE, overriding pkg-config
|
||||
EDITLINE_LIBS
|
||||
linker flags for EDITLINE, overriding pkg-config
|
||||
|
||||
Use these variables to override the choices made by `configure' or to help
|
||||
it to find libraries and programs with nonstandard names/locations.
|
||||
@@ -3091,7 +3065,7 @@ if test -z "$CFLAGS"; then :
|
||||
fi
|
||||
case "$host_os" in
|
||||
linux*)
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"} -Wl,--version-script,.export.sym"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"} -Wl,--version-script,.export.sym"
|
||||
# equivalent to -rdynamic
|
||||
ELDFLAGS="-Wl,--export-dynamic"
|
||||
# FIXME Generate list and use --dynamic-list=.dlopen.sym
|
||||
@@ -3103,7 +3077,6 @@ case "$host_os" in
|
||||
BUILD_LVMPOLLD=no
|
||||
LOCKDSANLOCK=no
|
||||
LOCKDDLM=no
|
||||
LOCKDDLM_CONTROL=no
|
||||
ODIRECT=yes
|
||||
DM_IOCTLS=yes
|
||||
SELINUX=yes
|
||||
@@ -3112,7 +3085,7 @@ case "$host_os" in
|
||||
;;
|
||||
darwin*)
|
||||
CFLAGS="$CFLAGS -no-cpp-precomp -fno-common"
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"}"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"}"
|
||||
ELDFLAGS=
|
||||
CLDWHOLEARCHIVE="-all_load"
|
||||
CLDNOWHOLEARCHIVE=
|
||||
@@ -3125,7 +3098,7 @@ case "$host_os" in
|
||||
BLKDEACTIVATE=no
|
||||
;;
|
||||
*)
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"}"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"}"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -6661,7 +6634,7 @@ $as_echo "#define _REENTRANT 1" >>confdefs.h
|
||||
|
||||
################################################################################
|
||||
for ac_func in ftruncate gethostname getpagesize gettimeofday localtime_r \
|
||||
memchr memset mkdir mkfifo munmap nl_langinfo pselect realpath rmdir setenv \
|
||||
memchr memset mkdir mkfifo munmap nl_langinfo realpath rmdir setenv \
|
||||
setlocale strcasecmp strchr strcspn strdup strerror strncasecmp strndup \
|
||||
strrchr strspn strstr strtol strtoul uname
|
||||
do :
|
||||
@@ -6677,17 +6650,6 @@ else
|
||||
fi
|
||||
done
|
||||
|
||||
for ac_func in prlimit
|
||||
do :
|
||||
ac_fn_c_check_func "$LINENO" "prlimit" "ac_cv_func_prlimit"
|
||||
if test "x$ac_cv_func_prlimit" = xyes; then :
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
#define HAVE_PRLIMIT 1
|
||||
_ACEOF
|
||||
|
||||
fi
|
||||
done
|
||||
|
||||
# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works
|
||||
# for constant arguments. Useless!
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5
|
||||
@@ -9598,7 +9560,7 @@ $as_echo_n "checking whether to include vdo... " >&6; }
|
||||
if test "${with_vdo+set}" = set; then :
|
||||
withval=$with_vdo; VDO=$withval
|
||||
else
|
||||
VDO="internal"
|
||||
VDO="none"
|
||||
fi
|
||||
|
||||
|
||||
@@ -9758,7 +9720,7 @@ $as_echo_n "checking whether to include writecache... " >&6; }
|
||||
if test "${with_writecache+set}" = set; then :
|
||||
withval=$with_writecache; WRITECACHE=$withval
|
||||
else
|
||||
WRITECACHE="internal"
|
||||
WRITECACHE="none"
|
||||
fi
|
||||
|
||||
|
||||
@@ -9775,31 +9737,6 @@ $as_echo "#define WRITECACHE_INTERNAL 1" >>confdefs.h
|
||||
*) as_fn_error $? "--with-writecache parameter invalid" "$LINENO" 5 ;;
|
||||
esac
|
||||
|
||||
################################################################################
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include integrity" >&5
|
||||
$as_echo_n "checking whether to include integrity... " >&6; }
|
||||
|
||||
# Check whether --with-integrity was given.
|
||||
if test "${with_integrity+set}" = set; then :
|
||||
withval=$with_integrity; INTEGRITY=$withval
|
||||
else
|
||||
INTEGRITY="internal"
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INTEGRITY" >&5
|
||||
$as_echo "$INTEGRITY" >&6; }
|
||||
|
||||
case "$INTEGRITY" in
|
||||
none) ;;
|
||||
internal)
|
||||
|
||||
$as_echo "#define INTEGRITY_INTERNAL 1" >>confdefs.h
|
||||
|
||||
;;
|
||||
*) as_fn_error $? "--with-integrity parameter invalid" "$LINENO" 5 ;;
|
||||
esac
|
||||
|
||||
################################################################################
|
||||
# Check whether --enable-readline was given.
|
||||
if test "${enable_readline+set}" = set; then :
|
||||
@@ -9809,15 +9746,6 @@ else
|
||||
fi
|
||||
|
||||
|
||||
################################################################################
|
||||
# Check whether --enable-editline was given.
|
||||
if test "${enable_editline+set}" = set; then :
|
||||
enableval=$enable_editline; EDITLINE=$enableval
|
||||
else
|
||||
EDITLINE=no
|
||||
fi
|
||||
|
||||
|
||||
################################################################################
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable realtime support" >&5
|
||||
$as_echo_n "checking whether to enable realtime support... " >&6; }
|
||||
@@ -11030,97 +10958,6 @@ $as_echo "#define LOCKDDLM_SUPPORT 1" >>confdefs.h
|
||||
BUILD_LVMLOCKD=yes
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build lvmlockddlmcontrol" >&5
|
||||
$as_echo_n "checking whether to build lvmlockddlmcontrol... " >&6; }
|
||||
# Check whether --enable-lvmlockd-dlmcontrol was given.
|
||||
if test "${enable_lvmlockd_dlmcontrol+set}" = set; then :
|
||||
enableval=$enable_lvmlockd_dlmcontrol; LOCKDDLM_CONTROL=$enableval
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDDLM_CONTROL" >&5
|
||||
$as_echo "$LOCKDDLM_CONTROL" >&6; }
|
||||
|
||||
BUILD_LOCKDDLM_CONTROL=$LOCKDDLM_CONTROL
|
||||
|
||||
if test "$BUILD_LOCKDDLM_CONTROL" = yes; then
|
||||
|
||||
pkg_failed=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LOCKD_DLM_CONTROL" >&5
|
||||
$as_echo_n "checking for LOCKD_DLM_CONTROL... " >&6; }
|
||||
|
||||
if test -n "$LOCKD_DLM_CONTROL_CFLAGS"; then
|
||||
pkg_cv_LOCKD_DLM_CONTROL_CFLAGS="$LOCKD_DLM_CONTROL_CFLAGS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libdlmcontrol >= 3.2\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libdlmcontrol >= 3.2") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_LOCKD_DLM_CONTROL_CFLAGS=`$PKG_CONFIG --cflags "libdlmcontrol >= 3.2" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
if test -n "$LOCKD_DLM_CONTROL_LIBS"; then
|
||||
pkg_cv_LOCKD_DLM_CONTROL_LIBS="$LOCKD_DLM_CONTROL_LIBS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libdlmcontrol >= 3.2\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libdlmcontrol >= 3.2") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_LOCKD_DLM_CONTROL_LIBS=`$PKG_CONFIG --libs "libdlmcontrol >= 3.2" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if test $pkg_failed = yes; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
|
||||
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
|
||||
_pkg_short_errors_supported=yes
|
||||
else
|
||||
_pkg_short_errors_supported=no
|
||||
fi
|
||||
if test $_pkg_short_errors_supported = yes; then
|
||||
LOCKD_DLM_CONTROL_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libdlmcontrol >= 3.2" 2>&1`
|
||||
else
|
||||
LOCKD_DLM_CONTROL_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libdlmcontrol >= 3.2" 2>&1`
|
||||
fi
|
||||
# Put the nasty error message in config.log where it belongs
|
||||
echo "$LOCKD_DLM_CONTROL_PKG_ERRORS" >&5
|
||||
|
||||
$bailout
|
||||
elif test $pkg_failed = untried; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
$bailout
|
||||
else
|
||||
LOCKD_DLM_CONTROL_CFLAGS=$pkg_cv_LOCKD_DLM_CONTROL_CFLAGS
|
||||
LOCKD_DLM_CONTROL_LIBS=$pkg_cv_LOCKD_DLM_CONTROL_LIBS
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
HAVE_LOCKD_DLM_CONTROL=yes
|
||||
fi
|
||||
|
||||
$as_echo "#define LOCKDDLM_CONTROL_SUPPORT 1" >>confdefs.h
|
||||
|
||||
BUILD_LVMLOCKD=yes
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build lvmlockd" >&5
|
||||
$as_echo_n "checking whether to build lvmlockd... " >&6; }
|
||||
@@ -11845,7 +11682,6 @@ if test "$BUILD_LVMDBUSD" = yes; then
|
||||
|
||||
|
||||
|
||||
|
||||
if test -n "$PYTHON"; then
|
||||
# If the user set $PYTHON, use it and don't search something else.
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $PYTHON version is >= 3" >&5
|
||||
@@ -11881,7 +11717,7 @@ if ${am_cv_pathless_PYTHON+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
|
||||
for am_cv_pathless_PYTHON in python3 python2 python python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
|
||||
for am_cv_pathless_PYTHON in python python2 python3 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
|
||||
test "$am_cv_pathless_PYTHON" = none && break
|
||||
prog="import sys
|
||||
# split strings by '.' and convert to numeric. Append some zeros
|
||||
@@ -12710,61 +12546,6 @@ fi
|
||||
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BLKZEROOUT in sys/ioctl.h." >&5
|
||||
$as_echo_n "checking for BLKZEROOUT in sys/ioctl.h.... " >&6; }
|
||||
if ${ac_cv_have_blkzeroout+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
#include <sys/ioctl.h>
|
||||
#include <linux/fs.h>
|
||||
int bar(void) { return ioctl(0, BLKZEROOUT, 0); }
|
||||
|
||||
int
|
||||
main ()
|
||||
{
|
||||
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
_ACEOF
|
||||
if ac_fn_c_try_compile "$LINENO"; then :
|
||||
ac_cv_have_blkzeroout=yes
|
||||
else
|
||||
ac_cv_have_blkzeroout=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_have_blkzeroout" >&5
|
||||
$as_echo "$ac_cv_have_blkzeroout" >&6; }
|
||||
|
||||
|
||||
# Check whether --enable-blkzeroout was given.
|
||||
if test "${enable_blkzeroout+set}" = set; then :
|
||||
enableval=$enable_blkzeroout; BLKZEROOUT=$enableval
|
||||
else
|
||||
BLKZEROOUT=yes
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use BLKZEROOUT for device zeroing" >&5
|
||||
$as_echo_n "checking whether to use BLKZEROOUT for device zeroing... " >&6; }
|
||||
if test "$BLKZEROOUT" = yes; then
|
||||
if test $ac_cv_have_blkzeroout = yes; then :
|
||||
|
||||
$as_echo "#define HAVE_BLKZEROOUT 1" >>confdefs.h
|
||||
|
||||
else
|
||||
BLKZEROOUT=no
|
||||
fi
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $BLKZEROOUT" >&5
|
||||
$as_echo "$BLKZEROOUT" >&6; }
|
||||
|
||||
|
||||
################################################################################
|
||||
RT_LIBS=
|
||||
HAVE_REALTIME=no
|
||||
@@ -12885,86 +12666,6 @@ fi
|
||||
done
|
||||
|
||||
|
||||
################################################################################
|
||||
if test "$EDITLINE" == yes; then
|
||||
|
||||
pkg_failed=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for EDITLINE" >&5
|
||||
$as_echo_n "checking for EDITLINE... " >&6; }
|
||||
|
||||
if test -n "$EDITLINE_CFLAGS"; then
|
||||
pkg_cv_EDITLINE_CFLAGS="$EDITLINE_CFLAGS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libedit\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libedit") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_EDITLINE_CFLAGS=`$PKG_CONFIG --cflags "libedit" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
if test -n "$EDITLINE_LIBS"; then
|
||||
pkg_cv_EDITLINE_LIBS="$EDITLINE_LIBS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libedit\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "libedit") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_EDITLINE_LIBS=`$PKG_CONFIG --libs "libedit" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if test $pkg_failed = yes; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
|
||||
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
|
||||
_pkg_short_errors_supported=yes
|
||||
else
|
||||
_pkg_short_errors_supported=no
|
||||
fi
|
||||
if test $_pkg_short_errors_supported = yes; then
|
||||
EDITLINE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libedit" 2>&1`
|
||||
else
|
||||
EDITLINE_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libedit" 2>&1`
|
||||
fi
|
||||
# Put the nasty error message in config.log where it belongs
|
||||
echo "$EDITLINE_PKG_ERRORS" >&5
|
||||
|
||||
as_fn_error $? "libedit could not be found which is required for the --enable-readline option." "$LINENO" 5
|
||||
|
||||
elif test $pkg_failed = untried; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
as_fn_error $? "libedit could not be found which is required for the --enable-readline option." "$LINENO" 5
|
||||
|
||||
else
|
||||
EDITLINE_CFLAGS=$pkg_cv_EDITLINE_CFLAGS
|
||||
EDITLINE_LIBS=$pkg_cv_EDITLINE_LIBS
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
|
||||
|
||||
$as_echo "#define EDITLINE_SUPPORT 1" >>confdefs.h
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
if test "$READLINE" != no; then
|
||||
lvm_saved_libs=$LIBS
|
||||
@@ -13403,28 +13104,6 @@ $as_echo_n "checking whether to enable readline... " >&6; }
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $READLINE" >&5
|
||||
$as_echo "$READLINE" >&6; }
|
||||
|
||||
if test "$EDITLINE" = yes; then
|
||||
for ac_header in editline/readline.h editline/history.h
|
||||
do :
|
||||
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
|
||||
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
|
||||
if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
|
||||
_ACEOF
|
||||
|
||||
else
|
||||
hard_bailout
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable editline" >&5
|
||||
$as_echo_n "checking whether to enable editline... " >&6; }
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $EDITLINE" >&5
|
||||
$as_echo "$EDITLINE" >&6; }
|
||||
|
||||
if test "$BUILD_CMIRRORD" = yes; then
|
||||
for ac_func in atexit
|
||||
do :
|
||||
@@ -14104,8 +13783,6 @@ _ACEOF
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -15479,8 +15156,8 @@ $as_echo "$as_me: WARNING: You should install latest cache_check vsn 0.7.0 to us
|
||||
fi
|
||||
|
||||
if test -n "$VDO_CONFIGURE_WARN"; then :
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!" >&5
|
||||
$as_echo "$as_me: WARNING: Unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!" >&2;}
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!" >&5
|
||||
$as_echo "$as_me: WARNING: unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!" >&2;}
|
||||
fi
|
||||
|
||||
|
||||
|
||||
92
configure.ac
92
configure.ac
@@ -30,7 +30,7 @@ AC_CANONICAL_TARGET([])
|
||||
AS_IF([test -z "$CFLAGS"], [COPTIMISE_FLAG="-O2"])
|
||||
case "$host_os" in
|
||||
linux*)
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"} -Wl,--version-script,.export.sym"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"} -Wl,--version-script,.export.sym"
|
||||
# equivalent to -rdynamic
|
||||
ELDFLAGS="-Wl,--export-dynamic"
|
||||
# FIXME Generate list and use --dynamic-list=.dlopen.sym
|
||||
@@ -51,7 +51,7 @@ case "$host_os" in
|
||||
;;
|
||||
darwin*)
|
||||
CFLAGS="$CFLAGS -no-cpp-precomp -fno-common"
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"}"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"}"
|
||||
ELDFLAGS=
|
||||
CLDWHOLEARCHIVE="-all_load"
|
||||
CLDNOWHOLEARCHIVE=
|
||||
@@ -64,7 +64,7 @@ case "$host_os" in
|
||||
BLKDEACTIVATE=no
|
||||
;;
|
||||
*)
|
||||
CLDFLAGS="${CLDFLAGS-"$LDFLAGS"}"
|
||||
CLDFLAGS="${CLDFLAGS:"$LDFLAGS"}"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -153,10 +153,9 @@ AC_DEFINE([_REENTRANT], 1, [Define to use re-entrant thread safe versions])
|
||||
################################################################################
|
||||
dnl -- Check for functions
|
||||
AC_CHECK_FUNCS([ftruncate gethostname getpagesize gettimeofday localtime_r \
|
||||
memchr memset mkdir mkfifo munmap nl_langinfo pselect realpath rmdir setenv \
|
||||
memchr memset mkdir mkfifo munmap nl_langinfo realpath rmdir setenv \
|
||||
setlocale strcasecmp strchr strcspn strdup strerror strncasecmp strndup \
|
||||
strrchr strspn strstr strtol strtoul uname], , [AC_MSG_ERROR(bailing out)])
|
||||
AC_CHECK_FUNCS([prlimit])
|
||||
AC_FUNC_ALLOCA
|
||||
AC_FUNC_CLOSEDIR_VOID
|
||||
AC_FUNC_CHOWN
|
||||
@@ -607,7 +606,7 @@ AC_MSG_CHECKING(whether to include vdo)
|
||||
AC_ARG_WITH(vdo,
|
||||
AC_HELP_STRING([--with-vdo=TYPE],
|
||||
[vdo support: internal/none [internal]]),
|
||||
VDO=$withval, VDO="internal")
|
||||
VDO=$withval, VDO="none")
|
||||
|
||||
AC_MSG_RESULT($VDO)
|
||||
|
||||
@@ -655,7 +654,7 @@ AC_MSG_CHECKING(whether to include writecache)
|
||||
AC_ARG_WITH(writecache,
|
||||
AC_HELP_STRING([--with-writecache=TYPE],
|
||||
[writecache support: internal/none [internal]]),
|
||||
WRITECACHE=$withval, WRITECACHE="internal")
|
||||
WRITECACHE=$withval, WRITECACHE="none")
|
||||
|
||||
AC_MSG_RESULT($WRITECACHE)
|
||||
|
||||
@@ -667,36 +666,12 @@ case "$WRITECACHE" in
|
||||
*) AC_MSG_ERROR([--with-writecache parameter invalid]) ;;
|
||||
esac
|
||||
|
||||
################################################################################
|
||||
dnl -- integrity inclusion type
|
||||
AC_MSG_CHECKING(whether to include integrity)
|
||||
AC_ARG_WITH(integrity,
|
||||
AC_HELP_STRING([--with-integrity=TYPE],
|
||||
[integrity support: internal/none [internal]]),
|
||||
INTEGRITY=$withval, INTEGRITY="internal")
|
||||
|
||||
AC_MSG_RESULT($INTEGRITY)
|
||||
|
||||
case "$INTEGRITY" in
|
||||
none) ;;
|
||||
internal)
|
||||
AC_DEFINE([INTEGRITY_INTERNAL], 1, [Define to 1 to include built-in support for integrity.])
|
||||
;;
|
||||
*) AC_MSG_ERROR([--with-integrity parameter invalid]) ;;
|
||||
esac
|
||||
|
||||
################################################################################
|
||||
dnl -- Disable readline
|
||||
AC_ARG_ENABLE([readline],
|
||||
AC_HELP_STRING([--disable-readline], [disable readline support]),
|
||||
READLINE=$enableval, READLINE=maybe)
|
||||
|
||||
################################################################################
|
||||
dnl -- Disable editline
|
||||
AC_ARG_ENABLE([editline],
|
||||
AC_HELP_STRING([--enable-editline], [enable editline support]),
|
||||
EDITLINE=$enableval, EDITLINE=no)
|
||||
|
||||
################################################################################
|
||||
dnl -- Disable realtime clock support
|
||||
AC_MSG_CHECKING(whether to enable realtime support)
|
||||
@@ -740,7 +715,7 @@ dnl -- Set up pidfile and run directory
|
||||
AH_TEMPLATE(DEFAULT_PID_DIR)
|
||||
AC_ARG_WITH(default-pid-dir,
|
||||
AC_HELP_STRING([--with-default-pid-dir=PID_DIR],
|
||||
[default directory to keep PID files in [autodetect]]),
|
||||
[Default directory to keep PID files in. [autodetect]]),
|
||||
DEFAULT_PID_DIR="$withval", DEFAULT_PID_DIR=$RUN_DIR)
|
||||
AC_DEFINE_UNQUOTED(DEFAULT_PID_DIR, ["$DEFAULT_PID_DIR"],
|
||||
[Default directory to keep PID files in.])
|
||||
@@ -748,7 +723,7 @@ AC_DEFINE_UNQUOTED(DEFAULT_PID_DIR, ["$DEFAULT_PID_DIR"],
|
||||
AH_TEMPLATE(DEFAULT_DM_RUN_DIR, [Name of default DM run directory.])
|
||||
AC_ARG_WITH(default-dm-run-dir,
|
||||
AC_HELP_STRING([--with-default-dm-run-dir=DM_RUN_DIR],
|
||||
[default DM run directory [autodetect]]),
|
||||
[ Default DM run directory. [autodetect]]),
|
||||
DEFAULT_DM_RUN_DIR="$withval", DEFAULT_DM_RUN_DIR=$RUN_DIR)
|
||||
AC_DEFINE_UNQUOTED(DEFAULT_DM_RUN_DIR, ["$DEFAULT_DM_RUN_DIR"],
|
||||
[Default DM run directory.])
|
||||
@@ -756,7 +731,7 @@ AC_DEFINE_UNQUOTED(DEFAULT_DM_RUN_DIR, ["$DEFAULT_DM_RUN_DIR"],
|
||||
AH_TEMPLATE(DEFAULT_RUN_DIR, [Name of default LVM run directory.])
|
||||
AC_ARG_WITH(default-run-dir,
|
||||
AC_HELP_STRING([--with-default-run-dir=RUN_DIR],
|
||||
[default LVM run directory [autodetect_run_dir/lvm]]),
|
||||
[Default LVM run directory. [autodetect_run_dir/lvm]]),
|
||||
DEFAULT_RUN_DIR="$withval", DEFAULT_RUN_DIR="$RUN_DIR/lvm")
|
||||
AC_DEFINE_UNQUOTED(DEFAULT_RUN_DIR, ["$DEFAULT_RUN_DIR"],
|
||||
[Default LVM run directory.])
|
||||
@@ -1224,9 +1199,6 @@ if test "$BUILD_LVMDBUSD" = yes; then
|
||||
unset am_cv_pathless_PYTHON ac_cv_path_PYTHON am_cv_python_platform
|
||||
unset am_cv_python_pythondir am_cv_python_version am_cv_python_pyexecdir
|
||||
unset ac_cv_path_PYTHON_CONFIG ac_cv_path_ac_pt_PYTHON_CONFIG
|
||||
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],[ python3 python2 python dnl
|
||||
python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 dnl
|
||||
python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 ])
|
||||
AM_PATH_PYTHON([3])
|
||||
PYTHON3=$PYTHON
|
||||
test -z "$PYTHON3" && AC_MSG_ERROR([python3 is required for --enable-python3_bindings or --enable-dbus-service but cannot be found])
|
||||
@@ -1354,33 +1326,6 @@ if test "$SELINUX" = yes; then
|
||||
HAVE_SELINUX=no ])
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
dnl -- Check BLKZEROOUT support
|
||||
|
||||
AC_CACHE_CHECK([for BLKZEROOUT in sys/ioctl.h.],
|
||||
[ac_cv_have_blkzeroout],
|
||||
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
[#include <sys/ioctl.h>
|
||||
#include <linux/fs.h>
|
||||
int bar(void) { return ioctl(0, BLKZEROOUT, 0); }]
|
||||
)], [ac_cv_have_blkzeroout=yes], [ac_cv_have_blkzeroout=no])])
|
||||
|
||||
|
||||
AC_ARG_ENABLE(blkzeroout,
|
||||
AC_HELP_STRING([--disable-blkzeroout],
|
||||
[do not use BLKZEROOUT for device zeroing]),
|
||||
BLKZEROOUT=$enableval, BLKZEROOUT=yes)
|
||||
|
||||
AC_MSG_CHECKING(whether to use BLKZEROOUT for device zeroing)
|
||||
if test "$BLKZEROOUT" = yes; then
|
||||
AC_IF_YES(ac_cv_have_blkzeroout,
|
||||
AC_DEFINE(HAVE_BLKZEROOUT, 1,
|
||||
[Define if ioctl BLKZEROOUT can be used for device zeroing.]),
|
||||
BLKZEROOUT=no)
|
||||
fi
|
||||
AC_MSG_RESULT($BLKZEROOUT)
|
||||
|
||||
|
||||
################################################################################
|
||||
dnl -- Check for realtime clock support
|
||||
RT_LIBS=
|
||||
@@ -1414,16 +1359,6 @@ AC_IF_YES(ac_cv_stat_st_ctim,
|
||||
dnl -- Check for getopt
|
||||
AC_CHECK_HEADERS(getopt.h, AC_DEFINE([HAVE_GETOPTLONG], 1, [Define to 1 if getopt_long is available.]))
|
||||
|
||||
################################################################################
|
||||
dnl -- Check for editline
|
||||
if test "$EDITLINE" == yes; then
|
||||
PKG_CHECK_MODULES([EDITLINE], [libedit], [
|
||||
AC_DEFINE([EDITLINE_SUPPORT], 1,
|
||||
[Define to 1 to include the LVM editline shell.])], AC_MSG_ERROR(
|
||||
[libedit could not be found which is required for the --enable-readline option.])
|
||||
)
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
dnl -- Check for readline (Shamelessly copied from parted 1.4.17)
|
||||
if test "$READLINE" != no; then
|
||||
@@ -1556,12 +1491,6 @@ fi
|
||||
AC_MSG_CHECKING(whether to enable readline)
|
||||
AC_MSG_RESULT($READLINE)
|
||||
|
||||
if test "$EDITLINE" = yes; then
|
||||
AC_CHECK_HEADERS(editline/readline.h editline/history.h,,hard_bailout)
|
||||
fi
|
||||
AC_MSG_CHECKING(whether to enable editline)
|
||||
AC_MSG_RESULT($EDITLINE)
|
||||
|
||||
if test "$BUILD_CMIRRORD" = yes; then
|
||||
AC_CHECK_FUNCS(atexit,,hard_bailout)
|
||||
fi
|
||||
@@ -1817,7 +1746,6 @@ AC_SUBST(QUORUM_CFLAGS)
|
||||
AC_SUBST(QUORUM_LIBS)
|
||||
AC_SUBST(RT_LIBS)
|
||||
AC_SUBST(READLINE_LIBS)
|
||||
AC_SUBST(EDITLINE_LIBS)
|
||||
AC_SUBST(REPLICATORS)
|
||||
AC_SUBST(SACKPT_CFLAGS)
|
||||
AC_SUBST(SACKPT_LIBS)
|
||||
@@ -1953,7 +1881,7 @@ AS_IF([test -n "$CACHE_CHECK_VERSION_WARN"],
|
||||
[AC_MSG_WARN([You should install latest cache_check vsn 0.7.0 to use lvm2 cache metadata format 2])])
|
||||
|
||||
AS_IF([test -n "$VDO_CONFIGURE_WARN"],
|
||||
[AC_MSG_WARN([Unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!])])
|
||||
[AC_MSG_WARN([unrecognized 'vdoformat' tool is REQUIRED for VDO logical volume creation!])])
|
||||
|
||||
|
||||
AS_IF([test "$ODIRECT" != yes],
|
||||
|
||||
@@ -46,7 +46,6 @@ const char *find_config_tree_str(struct cmd_context *cmd, int id, struct profile
|
||||
return "STRING";
|
||||
}
|
||||
|
||||
/*
|
||||
struct logical_volume *origin_from_cow(const struct logical_volume *lv)
|
||||
{
|
||||
if (lv)
|
||||
@@ -54,7 +53,6 @@ struct logical_volume *origin_from_cow(const struct logical_volume *lv)
|
||||
|
||||
__coverity_panic__();
|
||||
}
|
||||
*/
|
||||
|
||||
/* simple_memccpy() from glibc */
|
||||
void *memccpy(void *dest, const void *src, int c, size_t n)
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
#ifndef _LVM_CLOG_CLUSTER_H
|
||||
#define _LVM_CLOG_CLUSTER_H
|
||||
|
||||
#include "libdm/libdevmapper.h"
|
||||
#include "libdm/misc/dm-log-userspace.h"
|
||||
#include "libdm/libdevmapper.h"
|
||||
|
||||
#define DM_ULOG_RESPONSE 0x1000U /* in last byte of 32-bit value */
|
||||
#define DM_ULOG_CHECKPOINT_READY 21
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
#ifndef _LVM_CLOG_FUNCTIONS_H
|
||||
#define _LVM_CLOG_FUNCTIONS_H
|
||||
|
||||
#include "libdm/libdevmapper.h"
|
||||
#include "libdm/misc/dm-log-userspace.h"
|
||||
#include "device_mapper/misc/dm-log-userspace.h"
|
||||
#include "cluster.h"
|
||||
|
||||
#define LOG_RESUMED 1
|
||||
|
||||
@@ -752,7 +752,7 @@ static void _exit_timeout(void *unused __attribute__((unused)))
|
||||
static void *_timeout_thread(void *unused __attribute__((unused)))
|
||||
{
|
||||
struct thread_status *thread;
|
||||
struct timespec timeout, real_time;
|
||||
struct timespec timeout;
|
||||
time_t curr_time;
|
||||
int ret;
|
||||
|
||||
@@ -763,16 +763,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
|
||||
while (!dm_list_empty(&_timeout_registry)) {
|
||||
timeout.tv_sec = 0;
|
||||
timeout.tv_nsec = 0;
|
||||
#ifndef HAVE_REALTIME
|
||||
curr_time = time(NULL);
|
||||
#else
|
||||
if (clock_gettime(CLOCK_REALTIME, &real_time)) {
|
||||
log_error("Failed to read clock_gettime().");
|
||||
break;
|
||||
}
|
||||
/* 10ms back to the future */
|
||||
curr_time = real_time.tv_sec + ((real_time.tv_nsec > (1000000000 - 10000000)) ? 1 : 0);
|
||||
#endif
|
||||
|
||||
dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) {
|
||||
if (thread->next_time <= curr_time) {
|
||||
@@ -1494,34 +1485,37 @@ static int _client_read(struct dm_event_fifos *fifos,
|
||||
t.tv_usec = 0;
|
||||
ret = select(fifos->client + 1, &fds, NULL, NULL, &t);
|
||||
|
||||
if (!ret && bytes)
|
||||
continue; /* trying to finish read */
|
||||
if (!ret && !bytes) /* nothing to read */
|
||||
return 0;
|
||||
|
||||
if (ret <= 0) /* nothing to read */
|
||||
goto bad;
|
||||
if (!ret) /* trying to finish read */
|
||||
continue;
|
||||
|
||||
if (ret < 0) /* error */
|
||||
return 0;
|
||||
|
||||
ret = read(fifos->client, buf + bytes, size - bytes);
|
||||
bytes += ret > 0 ? ret : 0;
|
||||
if (!msg->data && (bytes == 2 * sizeof(uint32_t))) {
|
||||
if (header && (bytes == 2 * sizeof(uint32_t))) {
|
||||
msg->cmd = ntohl(header[0]);
|
||||
size = msg->size = ntohl(header[1]);
|
||||
bytes = 0;
|
||||
|
||||
if (!(size = msg->size = ntohl(header[1])))
|
||||
break;
|
||||
|
||||
if (!(buf = msg->data = malloc(msg->size)))
|
||||
goto bad;
|
||||
if (!size)
|
||||
break; /* No data -> error */
|
||||
buf = msg->data = malloc(msg->size);
|
||||
if (!buf)
|
||||
break; /* No mem -> error */
|
||||
header = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bytes == size)
|
||||
return 1;
|
||||
if (bytes != size) {
|
||||
free(msg->data);
|
||||
msg->data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bad:
|
||||
free(msg->data);
|
||||
msg->data = NULL;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1751,8 +1745,7 @@ static void _init_thread_signals(void)
|
||||
sigdelset(&my_sigset, SIGHUP);
|
||||
sigdelset(&my_sigset, SIGQUIT);
|
||||
|
||||
if (pthread_sigmask(SIG_BLOCK, &my_sigset, NULL))
|
||||
log_sys_error("pthread_sigmask", "SIG_BLOCK");
|
||||
pthread_sigmask(SIG_BLOCK, &my_sigset, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2028,8 +2021,8 @@ static int _reinstate_registrations(struct dm_event_fifos *fifos)
|
||||
static void _restart_dmeventd(void)
|
||||
{
|
||||
struct dm_event_fifos fifos = {
|
||||
.client = -1,
|
||||
.server = -1,
|
||||
.client = -1,
|
||||
/* FIXME Make these either configurable or depend directly on dmeventd_path */
|
||||
.client_path = DM_EVENT_FIFO_CLIENT,
|
||||
.server_path = DM_EVENT_FIFO_SERVER
|
||||
@@ -2243,8 +2236,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
_init_thread_signals();
|
||||
|
||||
if (pthread_mutex_init(&_global_mutex, NULL))
|
||||
exit(EXIT_FAILURE);
|
||||
pthread_mutex_init(&_global_mutex, NULL);
|
||||
|
||||
if (!_systemd_activation && !_open_fifos(&fifos))
|
||||
exit(EXIT_FIFO_FAILURE);
|
||||
|
||||
@@ -237,16 +237,16 @@ static int _daemon_read(struct dm_event_fifos *fifos,
|
||||
ret = select(fifos->server + 1, &fds, NULL, NULL, &tval);
|
||||
if (ret < 0 && errno != EINTR) {
|
||||
log_error("Unable to read from event server.");
|
||||
goto bad;
|
||||
return 0;
|
||||
}
|
||||
if ((ret == 0) && (i > 4) && !bytes) {
|
||||
log_error("No input from event server.");
|
||||
goto bad;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (ret < 1) {
|
||||
log_error("Unable to read from event server.");
|
||||
goto bad;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = read(fifos->server, buf + bytes, size);
|
||||
@@ -255,32 +255,25 @@ static int _daemon_read(struct dm_event_fifos *fifos,
|
||||
continue;
|
||||
|
||||
log_error("Unable to read from event server.");
|
||||
goto bad;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bytes += ret;
|
||||
if (!msg->data && (bytes == 2 * sizeof(uint32_t))) {
|
||||
if (header && (bytes == 2 * sizeof(uint32_t))) {
|
||||
msg->cmd = ntohl(header[0]);
|
||||
msg->size = ntohl(header[1]);
|
||||
buf = msg->data = malloc(msg->size);
|
||||
size = msg->size;
|
||||
bytes = 0;
|
||||
|
||||
if (!(size = msg->size = ntohl(header[1])))
|
||||
break;
|
||||
|
||||
if (!(buf = msg->data = malloc(msg->size))) {
|
||||
log_error("Unable to allocate message data.");
|
||||
return 0;
|
||||
}
|
||||
header = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bytes == size)
|
||||
return 1;
|
||||
|
||||
bad:
|
||||
free(msg->data);
|
||||
msg->data = NULL;
|
||||
|
||||
return 0;
|
||||
if (bytes != size) {
|
||||
free(msg->data);
|
||||
msg->data = NULL;
|
||||
}
|
||||
return bytes == size;
|
||||
}
|
||||
|
||||
/* Write message to daemon. */
|
||||
@@ -615,8 +608,8 @@ static int _do_event(int cmd, char *dmeventd_path, struct dm_event_daemon_messag
|
||||
{
|
||||
int ret;
|
||||
struct dm_event_fifos fifos = {
|
||||
.client = -1,
|
||||
.server = -1,
|
||||
.client = -1,
|
||||
/* FIXME Make these either configurable or depend directly on dmeventd_path */
|
||||
.client_path = DM_EVENT_FIFO_CLIENT,
|
||||
.server_path = DM_EVENT_FIFO_SERVER
|
||||
|
||||
@@ -71,7 +71,7 @@ int dmeventd_lvm2_init(void)
|
||||
if (!_lvm_handle) {
|
||||
lvm2_log_fn(_lvm2_print_log);
|
||||
|
||||
if (!(_lvm_handle = lvm2_init_threaded()))
|
||||
if (!(_lvm_handle = lvm2_init()))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
||||
@@ -76,17 +76,14 @@ static int _process_raid_event(struct dso_state *state, char *params, const char
|
||||
}
|
||||
|
||||
if (dead) {
|
||||
/*
|
||||
* Use the first event to run a repair ignoring any additonal ones.
|
||||
*
|
||||
* We presume lvconvert to do pre-repair
|
||||
* checks to avoid bloat in this plugin.
|
||||
*/
|
||||
if (!state->warned && status->insync_regions < status->total_regions) {
|
||||
state->warned = 1;
|
||||
log_warn("WARNING: waiting for resynchronization to finish "
|
||||
"before initiating repair on RAID device %s.", device);
|
||||
/* Fall through to allow lvconvert to run. */
|
||||
if (status->insync_regions < status->total_regions) {
|
||||
if (!state->warned) {
|
||||
state->warned = 1;
|
||||
log_warn("WARNING: waiting for resynchronization to finish "
|
||||
"before initiating repair on RAID device %s.", device);
|
||||
}
|
||||
|
||||
goto out; /* Not yet done syncing with accessible devices */
|
||||
}
|
||||
|
||||
if (state->failed)
|
||||
|
||||
@@ -16,12 +16,7 @@
|
||||
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
|
||||
#include "daemons/dmeventd/libdevmapper-event.h"
|
||||
|
||||
/*
|
||||
* Use parser from new device_mapper library.
|
||||
* Although during compilation we can see dm_vdo_status_parse()
|
||||
* in runtime we are linked agains systems libdm 'older' library
|
||||
* which does not provide this symbol and plugin fails to load
|
||||
*/
|
||||
/* Use parser from new device_mapper library */
|
||||
#include "device_mapper/vdo/status.c"
|
||||
|
||||
#include <sys/wait.h>
|
||||
|
||||
@@ -47,11 +47,9 @@ BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1')
|
||||
BASE_INTERFACE = 'com.redhat.lvmdbus1'
|
||||
PV_INTERFACE = BASE_INTERFACE + '.Pv'
|
||||
VG_INTERFACE = BASE_INTERFACE + '.Vg'
|
||||
VG_VDO_INTERFACE = BASE_INTERFACE + '.VgVdo'
|
||||
LV_INTERFACE = BASE_INTERFACE + '.Lv'
|
||||
LV_COMMON_INTERFACE = BASE_INTERFACE + '.LvCommon'
|
||||
THIN_POOL_INTERFACE = BASE_INTERFACE + '.ThinPool'
|
||||
VDO_POOL_INTERFACE = BASE_INTERFACE + '.VdoPool'
|
||||
CACHE_POOL_INTERFACE = BASE_INTERFACE + '.CachePool'
|
||||
LV_CACHED = BASE_INTERFACE + '.CachedLv'
|
||||
SNAPSHOT_INTERFACE = BASE_INTERFACE + '.Snapshot'
|
||||
@@ -63,7 +61,6 @@ PV_OBJ_PATH = BASE_OBJ_PATH + '/Pv'
|
||||
VG_OBJ_PATH = BASE_OBJ_PATH + '/Vg'
|
||||
LV_OBJ_PATH = BASE_OBJ_PATH + '/Lv'
|
||||
THIN_POOL_PATH = BASE_OBJ_PATH + "/ThinPool"
|
||||
VDO_POOL_PATH = BASE_OBJ_PATH + "/VdoPool"
|
||||
CACHE_POOL_PATH = BASE_OBJ_PATH + "/CachePool"
|
||||
HIDDEN_LV_PATH = BASE_OBJ_PATH + "/HiddenLv"
|
||||
MANAGER_OBJ_PATH = BASE_OBJ_PATH + '/Manager'
|
||||
@@ -74,7 +71,6 @@ pv_id = itertools.count()
|
||||
vg_id = itertools.count()
|
||||
lv_id = itertools.count()
|
||||
thin_id = itertools.count()
|
||||
vdo_id = itertools.count()
|
||||
cache_pool_id = itertools.count()
|
||||
job_id = itertools.count()
|
||||
hidden_lv = itertools.count()
|
||||
@@ -83,9 +79,6 @@ hidden_lv = itertools.count()
|
||||
load = None
|
||||
event = None
|
||||
|
||||
# Boolean to denote if lvm supports VDO integration
|
||||
vdo_support = False
|
||||
|
||||
# Global cached state
|
||||
db = None
|
||||
|
||||
|
||||
@@ -217,10 +217,7 @@ def options_to_cli_args(options):
|
||||
else:
|
||||
rc.append("--%s" % k)
|
||||
if v != "":
|
||||
if isinstance(v, int):
|
||||
rc.append(str(int(v)))
|
||||
else:
|
||||
rc.append(str(v))
|
||||
rc.append(str(v))
|
||||
return rc
|
||||
|
||||
|
||||
@@ -283,7 +280,7 @@ def vg_remove(vg_name, remove_options):
|
||||
def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
cmd.extend(['--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
cmd.extend(['--name', name, vg_name, '--yes'])
|
||||
pv_dest_ranges(cmd, pv_dests)
|
||||
return call(cmd)
|
||||
@@ -295,7 +292,7 @@ def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
|
||||
cmd.extend(["-s"])
|
||||
|
||||
if size_bytes != 0:
|
||||
cmd.extend(['--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
|
||||
cmd.extend(['--name', name, vg_name])
|
||||
return call(cmd)
|
||||
@@ -306,9 +303,9 @@ def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
if not thin_pool:
|
||||
cmd.extend(['--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
else:
|
||||
cmd.extend(['--thin', '--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
|
||||
|
||||
cmd.extend(['--yes'])
|
||||
return cmd
|
||||
@@ -323,10 +320,10 @@ def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
|
||||
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool):
|
||||
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
|
||||
cmd.extend(['--stripes', str(int(num_stripes))])
|
||||
cmd.extend(['--stripes', str(num_stripes)])
|
||||
|
||||
if stripe_size_kb != 0:
|
||||
cmd.extend(['--stripesize', str(int(stripe_size_kb))])
|
||||
cmd.extend(['--stripesize', str(stripe_size_kb)])
|
||||
|
||||
cmd.extend(['--name', name, vg_name])
|
||||
return call(cmd)
|
||||
@@ -339,13 +336,13 @@ def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
cmd.extend(['--type', raid_type])
|
||||
cmd.extend(['--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
|
||||
if num_stripes != 0:
|
||||
cmd.extend(['--stripes', str(int(num_stripes))])
|
||||
cmd.extend(['--stripes', str(num_stripes)])
|
||||
|
||||
if stripe_size_kb != 0:
|
||||
cmd.extend(['--stripesize', str(int(stripe_size_kb))])
|
||||
cmd.extend(['--stripesize', str(stripe_size_kb)])
|
||||
|
||||
cmd.extend(['--name', name, vg_name, '--yes'])
|
||||
return call(cmd)
|
||||
@@ -366,8 +363,8 @@ def vg_lv_create_mirror(
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
cmd.extend(['--type', 'mirror'])
|
||||
cmd.extend(['--mirrors', str(int(num_copies))])
|
||||
cmd.extend(['--size', '%dB' % size_bytes])
|
||||
cmd.extend(['--mirrors', str(num_copies)])
|
||||
cmd.extend(['--size', str(size_bytes) + 'B'])
|
||||
cmd.extend(['--name', name, vg_name, '--yes'])
|
||||
return call(cmd)
|
||||
|
||||
@@ -388,24 +385,6 @@ def vg_create_thin_pool(md_full_name, data_full_name, create_options):
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def vg_create_vdo_pool_lv_and_lv(vg_name, pool_name, lv_name, data_size,
|
||||
virtual_size, create_options):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
cmd.extend(['-y', '--type', 'vdo', '-n', lv_name,
|
||||
'-L', '%dB' % data_size, '-V', '%dB' % virtual_size,
|
||||
"%s/%s" % (vg_name, pool_name)])
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def vg_create_vdo_pool(pool_full_name, lv_name, virtual_size, create_options):
|
||||
cmd = ['lvconvert']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
cmd.extend(['--type', 'vdo-pool', '-n', lv_name, '--force', '-y',
|
||||
'-V', '%dB' % virtual_size, pool_full_name])
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def lv_remove(lv_path, remove_options):
|
||||
cmd = ['lvremove']
|
||||
cmd.extend(options_to_cli_args(remove_options))
|
||||
@@ -439,7 +418,7 @@ def lv_resize(lv_full_name, size_change, pv_dests,
|
||||
def lv_lv_create(lv_full_name, create_options, name, size_bytes):
|
||||
cmd = ['lvcreate']
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
cmd.extend(['--virtualsize', '%dB' % size_bytes, '-T'])
|
||||
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
|
||||
cmd.extend(['--name', name, lv_full_name, '--yes'])
|
||||
return call(cmd)
|
||||
|
||||
@@ -453,15 +432,6 @@ def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options):
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def lv_writecache_lv(cache_lv_full_name, lv_full_name, cache_options):
|
||||
# lvconvert --type writecache --cachevol VG/CacheLV VG/OriginLV
|
||||
cmd = ['lvconvert']
|
||||
cmd.extend(options_to_cli_args(cache_options))
|
||||
cmd.extend(['-y', '--type', 'writecache', '--cachevol',
|
||||
cache_lv_full_name, lv_full_name])
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
|
||||
cmd = ['lvconvert']
|
||||
if destroy_cache:
|
||||
@@ -477,28 +447,6 @@ def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def lv_vdo_compression(lv_path, enable, comp_options):
|
||||
cmd = ['lvchange', '--compression']
|
||||
if enable:
|
||||
cmd.append('y')
|
||||
else:
|
||||
cmd.append('n')
|
||||
cmd.extend(options_to_cli_args(comp_options))
|
||||
cmd.append(lv_path)
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def lv_vdo_deduplication(lv_path, enable, dedup_options):
|
||||
cmd = ['lvchange', '--deduplication']
|
||||
if enable:
|
||||
cmd.append('y')
|
||||
else:
|
||||
cmd.append('n')
|
||||
cmd.extend(options_to_cli_args(dedup_options))
|
||||
cmd.append(lv_path)
|
||||
return call(cmd)
|
||||
|
||||
|
||||
def supports_json():
|
||||
cmd = ['help']
|
||||
rc, out, err = call(cmd)
|
||||
@@ -511,16 +459,6 @@ def supports_json():
|
||||
return False
|
||||
|
||||
|
||||
def supports_vdo():
|
||||
cmd = ['segtypes']
|
||||
rc, out, err = call(cmd)
|
||||
if rc == 0:
|
||||
if "vdo" in out:
|
||||
log_debug("We have VDO support")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def lvm_full_report_json():
|
||||
pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
|
||||
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
|
||||
@@ -548,22 +486,6 @@ def lvm_full_report_json():
|
||||
|
||||
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
|
||||
|
||||
if cfg.vdo_support:
|
||||
lv_columns.extend(
|
||||
['vdo_operating_mode', 'vdo_compression_state', 'vdo_index_state',
|
||||
'vdo_used_size', 'vdo_saving_percent']
|
||||
)
|
||||
|
||||
lv_seg_columns.extend(
|
||||
['vdo_compression', 'vdo_deduplication',
|
||||
'vdo_use_metadata_hints', 'vdo_minimum_io_size',
|
||||
'vdo_block_map_cache_size', 'vdo_block_map_era_length',
|
||||
'vdo_use_sparse_index', 'vdo_index_memory_size',
|
||||
'vdo_slab_size', 'vdo_ack_threads', 'vdo_bio_threads',
|
||||
'vdo_bio_rotation', 'vdo_cpu_threads', 'vdo_hash_zone_threads',
|
||||
'vdo_logical_threads', 'vdo_physical_threads',
|
||||
'vdo_max_discard', 'vdo_write_policy', 'vdo_header_size'])
|
||||
|
||||
cmd = _dc('fullreport', [
|
||||
'-a', # Need hidden too
|
||||
'--configreport', 'pv', '-o', ','.join(pv_columns),
|
||||
@@ -634,7 +556,7 @@ def pv_resize(device, size_bytes, create_options):
|
||||
cmd.extend(options_to_cli_args(create_options))
|
||||
|
||||
if size_bytes != 0:
|
||||
cmd.extend(['--yes', '--setphysicalvolumesize', '%dB' % size_bytes])
|
||||
cmd.extend(['--yes', '--setphysicalvolumesize', str(size_bytes) + 'B'])
|
||||
|
||||
cmd.extend([device])
|
||||
return call(cmd)
|
||||
@@ -730,12 +652,12 @@ def vg_allocation_policy(vg_name, policy, policy_options):
|
||||
|
||||
|
||||
def vg_max_pv(vg_name, number, max_options):
|
||||
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(int(number))],
|
||||
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)],
|
||||
max_options)
|
||||
|
||||
|
||||
def vg_max_lv(vg_name, number, max_options):
|
||||
return _vg_value_set(vg_name, ['-l', str(int(number))], max_options)
|
||||
return _vg_value_set(vg_name, ['-l', str(number)], max_options)
|
||||
|
||||
|
||||
def vg_uuid_gen(vg_name, ignore, options):
|
||||
@@ -777,7 +699,6 @@ def activate_deactivate(op, name, activate, control_flags, options):
|
||||
op += 'n'
|
||||
|
||||
cmd.append(op)
|
||||
cmd.append("-y")
|
||||
cmd.append(name)
|
||||
return call(cmd)
|
||||
|
||||
|
||||
@@ -29,26 +29,11 @@ def _main_thread_load(refresh=True, emit_signal=True):
|
||||
refresh=refresh,
|
||||
emit_signal=emit_signal,
|
||||
cache_refresh=False)[1]
|
||||
|
||||
lv_changes = load_lvs(
|
||||
num_total_changes += load_lvs(
|
||||
refresh=refresh,
|
||||
emit_signal=emit_signal,
|
||||
cache_refresh=False)[1]
|
||||
|
||||
num_total_changes += lv_changes
|
||||
|
||||
# When the LVs change it can cause another change in the VGs which is
|
||||
# missed if we don't scan through the VGs again. We could achieve this
|
||||
# the other way and re-scan the LVs, but in general there are more LVs than
|
||||
# VGs, thus this should be more efficient. This happens when a LV interface
|
||||
# changes causing the dbus object representing it to be removed and
|
||||
# recreated.
|
||||
if refresh and lv_changes > 0:
|
||||
num_total_changes += load_vgs(
|
||||
refresh=refresh,
|
||||
emit_signal=emit_signal,
|
||||
cache_refresh=False)[1]
|
||||
|
||||
return num_total_changes
|
||||
|
||||
|
||||
|
||||
@@ -10,14 +10,14 @@
|
||||
from .automatedproperties import AutomatedProperties
|
||||
|
||||
from . import utils
|
||||
from .utils import vg_obj_path_generate, log_error, _handle_execute
|
||||
from .utils import vg_obj_path_generate, log_error
|
||||
import dbus
|
||||
from . import cmdhandler
|
||||
from . import cfg
|
||||
from .cfg import LV_INTERFACE, THIN_POOL_INTERFACE, SNAPSHOT_INTERFACE, \
|
||||
LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED, VDO_POOL_INTERFACE
|
||||
LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED
|
||||
from .request import RequestEntry
|
||||
from .utils import n, n32, d
|
||||
from .utils import n, n32
|
||||
from .loader import common
|
||||
from .state import State
|
||||
from . import background
|
||||
@@ -74,66 +74,23 @@ def lvs_state_retrieve(selection, cache_refresh=True):
|
||||
lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key)
|
||||
|
||||
for l in lvs:
|
||||
if cfg.vdo_support:
|
||||
rc.append(LvStateVdo(
|
||||
l['lv_uuid'], l['lv_name'],
|
||||
l['lv_path'], n(l['lv_size']),
|
||||
l['vg_name'],
|
||||
l['vg_uuid'], l['pool_lv_uuid'],
|
||||
l['pool_lv'], l['origin_uuid'], l['origin'],
|
||||
n32(l['data_percent']), l['lv_attr'],
|
||||
l['lv_tags'], l['lv_active'], l['data_lv'],
|
||||
l['metadata_lv'], l['segtype'], l['lv_role'],
|
||||
l['lv_layout'],
|
||||
n32(l['snap_percent']),
|
||||
n32(l['metadata_percent']),
|
||||
n32(l['copy_percent']),
|
||||
n32(l['sync_percent']),
|
||||
n(l['lv_metadata_size']),
|
||||
l['move_pv'],
|
||||
l['move_pv_uuid'],
|
||||
l['vdo_operating_mode'],
|
||||
l['vdo_compression_state'],
|
||||
l['vdo_index_state'],
|
||||
n(l['vdo_used_size']),
|
||||
d(l['vdo_saving_percent']),
|
||||
l['vdo_compression'],
|
||||
l['vdo_deduplication'],
|
||||
l['vdo_use_metadata_hints'],
|
||||
n32(l['vdo_minimum_io_size']),
|
||||
n(l['vdo_block_map_cache_size']),
|
||||
n32(l['vdo_block_map_era_length']),
|
||||
l['vdo_use_sparse_index'],
|
||||
n(l['vdo_index_memory_size']),
|
||||
n(l['vdo_slab_size']),
|
||||
n32(l['vdo_ack_threads']),
|
||||
n32(l['vdo_bio_threads']),
|
||||
n32(l['vdo_bio_rotation']),
|
||||
n32(l['vdo_cpu_threads']),
|
||||
n32(l['vdo_hash_zone_threads']),
|
||||
n32(l['vdo_logical_threads']),
|
||||
n32(l['vdo_physical_threads']),
|
||||
n32(l['vdo_max_discard']),
|
||||
l['vdo_write_policy'],
|
||||
n32(l['vdo_header_size'])))
|
||||
else:
|
||||
rc.append(LvState(
|
||||
l['lv_uuid'], l['lv_name'],
|
||||
l['lv_path'], n(l['lv_size']),
|
||||
l['vg_name'],
|
||||
l['vg_uuid'], l['pool_lv_uuid'],
|
||||
l['pool_lv'], l['origin_uuid'], l['origin'],
|
||||
n32(l['data_percent']), l['lv_attr'],
|
||||
l['lv_tags'], l['lv_active'], l['data_lv'],
|
||||
l['metadata_lv'], l['segtype'], l['lv_role'],
|
||||
l['lv_layout'],
|
||||
n32(l['snap_percent']),
|
||||
n32(l['metadata_percent']),
|
||||
n32(l['copy_percent']),
|
||||
n32(l['sync_percent']),
|
||||
n(l['lv_metadata_size']),
|
||||
l['move_pv'],
|
||||
l['move_pv_uuid']))
|
||||
rc.append(LvState(
|
||||
l['lv_uuid'], l['lv_name'],
|
||||
l['lv_path'], n(l['lv_size']),
|
||||
l['vg_name'],
|
||||
l['vg_uuid'], l['pool_lv_uuid'],
|
||||
l['pool_lv'], l['origin_uuid'], l['origin'],
|
||||
n32(l['data_percent']), l['lv_attr'],
|
||||
l['lv_tags'], l['lv_active'], l['data_lv'],
|
||||
l['metadata_lv'], l['segtype'], l['lv_role'],
|
||||
l['lv_layout'],
|
||||
n32(l['snap_percent']),
|
||||
n32(l['metadata_percent']),
|
||||
n32(l['copy_percent']),
|
||||
n32(l['sync_percent']),
|
||||
n(l['lv_metadata_size']),
|
||||
l['move_pv'],
|
||||
l['move_pv_uuid']))
|
||||
return rc
|
||||
|
||||
|
||||
@@ -237,8 +194,6 @@ class LvState(State):
|
||||
def _object_type_create(self):
|
||||
if self.Attr[0] == 't':
|
||||
return LvThinPool
|
||||
elif self.Attr[0] == 'd':
|
||||
return LvVdoPool
|
||||
elif self.Attr[0] == 'C':
|
||||
if 'pool' in self.layout:
|
||||
return LvCachePool
|
||||
@@ -265,34 +220,6 @@ class LvState(State):
|
||||
return (klass, path_method)
|
||||
|
||||
|
||||
class LvStateVdo(LvState):
|
||||
|
||||
def __init__(self, Uuid, Name, Path, SizeBytes,
|
||||
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
|
||||
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
|
||||
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
|
||||
MetaDataPercent, CopyPercent, SyncPercent,
|
||||
MetaDataSizeBytes, move_pv, move_pv_uuid,
|
||||
vdo_operating_mode, vdo_compression_state, vdo_index_state,
|
||||
vdo_used_size,vdo_saving_percent,vdo_compression,
|
||||
vdo_deduplication,vdo_use_metadata_hints,
|
||||
vdo_minimum_io_size,vdo_block_map_cache_size,
|
||||
vdo_block_map_era_length,vdo_use_sparse_index,
|
||||
vdo_index_memory_size,vdo_slab_size,vdo_ack_threads,
|
||||
vdo_bio_threads,vdo_bio_rotation,vdo_cpu_threads,
|
||||
vdo_hash_zone_threads,vdo_logical_threads,
|
||||
vdo_physical_threads,vdo_max_discard,
|
||||
vdo_write_policy,vdo_header_size):
|
||||
super(LvStateVdo, self).__init__(Uuid, Name, Path, SizeBytes,
|
||||
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
|
||||
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
|
||||
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
|
||||
MetaDataPercent, CopyPercent, SyncPercent,
|
||||
MetaDataSizeBytes, move_pv, move_pv_uuid)
|
||||
|
||||
utils.init_class_from_arguments(self, "vdo_", snake_to_pascal=True)
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
@utils.dbus_property(LV_COMMON_INTERFACE, 'Uuid', 's')
|
||||
@utils.dbus_property(LV_COMMON_INTERFACE, 'Name', 's')
|
||||
@@ -348,7 +275,13 @@ class LvCommon(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
_handle_execute(rc, out, err, LV_INTERFACE)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(lv_uuid, lv_name):
|
||||
@@ -388,7 +321,6 @@ class LvCommon(AutomatedProperties):
|
||||
'l': 'mirror log device', 'c': 'under conversion',
|
||||
'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data',
|
||||
'e': 'raid or pool metadata or pool metadata spare',
|
||||
'd': 'vdo pool', 'D': 'vdo pool data', 'g': 'integrity',
|
||||
'-': 'Unspecified'}
|
||||
return self.attr_struct(0, type_map)
|
||||
|
||||
@@ -524,7 +456,8 @@ class Lv(LvCommon):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Remove the LV, if successful then remove from the model
|
||||
LvCommon.handle_execute(*cmdhandler.lv_remove(lv_name, remove_options))
|
||||
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -544,8 +477,9 @@ class Lv(LvCommon):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
# Rename the logical volume
|
||||
LvCommon.handle_execute(*cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options))
|
||||
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
|
||||
rename_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -594,11 +528,13 @@ class Lv(LvCommon):
|
||||
remainder = space % 512
|
||||
optional_size = space + 512 - remainder
|
||||
|
||||
LvCommon.handle_execute(*cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options,name, optional_size))
|
||||
rc, out, err = cmdhandler.vg_lv_snapshot(
|
||||
lv_name, snapshot_options, name, optional_size)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
in_signature='stia{sv}',
|
||||
@@ -634,8 +570,9 @@ class Lv(LvCommon):
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
size_change = new_size_bytes - dbo.SizeBytes
|
||||
LvCommon.handle_execute(*cmdhandler.lv_resize(
|
||||
dbo.lvm_id, size_change,pv_dests, resize_options))
|
||||
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
|
||||
pv_dests, resize_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return "/"
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -670,8 +607,9 @@ class Lv(LvCommon):
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
LvCommon.handle_execute(*cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options))
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'lvchange', lv_name, activate, control_flags, options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -705,8 +643,9 @@ class Lv(LvCommon):
|
||||
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(uuid, lv_name)
|
||||
LvCommon.handle_execute(*cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options))
|
||||
rc, out, err = cmdhandler.lv_tag(
|
||||
lv_name, tags_add, tags_del, tag_options)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -743,152 +682,6 @@ class Lv(LvCommon):
|
||||
cb, cbe, return_tuple=False)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@staticmethod
|
||||
def _writecache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
|
||||
# Make sure we have dbus object representing lv to cache
|
||||
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
|
||||
|
||||
if lv_to_cache:
|
||||
fcn = lv_to_cache.lv_full_name()
|
||||
rc, out, err = cmdhandler.lv_writecache_lv(
|
||||
dbo.lv_full_name(), fcn, cache_options)
|
||||
if rc == 0:
|
||||
# When we cache an LV, the cache pool and the lv that is getting
|
||||
# cached need to be removed from the object manager and
|
||||
# re-created as their interfaces have changed!
|
||||
mt_remove_dbus_objects((dbo, lv_to_cache))
|
||||
cfg.load()
|
||||
|
||||
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
LV_INTERFACE, 'LV to cache with object path %s not present!' %
|
||||
lv_object_path)
|
||||
return lv_converted
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=LV_INTERFACE,
|
||||
in_signature='oia{sv}',
|
||||
out_signature='(oo)',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def WriteCacheLv(self, lv_object, tmo, cache_options, cb, cbe):
|
||||
r = RequestEntry(
|
||||
tmo, Lv._writecache_lv,
|
||||
(self.Uuid, self.lvm_id, lv_object,
|
||||
cache_options), cb, cbe)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'OperatingMode', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'CompressionState', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'IndexState', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'UsedSize', 't')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'SavingPercent', 'd')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'Compression', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'Deduplication', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'UseMetadataHints', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'MinimumIoSize', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'BlockMapCacheSize', "t")
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'BlockMapEraLength', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'UseSparseIndex', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'IndexMemorySize', 't')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'SlabSize', 't')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'AckThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'BioThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'BioRotation', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'CpuThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'HashZoneThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'LogicalThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'PhysicalThreads', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'MaxDiscard', 'u')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'WritePolicy', 's')
|
||||
@utils.dbus_property(VDO_POOL_INTERFACE, 'HeaderSize', 'u')
|
||||
class LvVdoPool(Lv):
|
||||
_DataLv_meta = ("o", VDO_POOL_INTERFACE)
|
||||
|
||||
def __init__(self, object_path, object_state):
|
||||
super(LvVdoPool, self).__init__(object_path, object_state)
|
||||
self.set_interface(VDO_POOL_INTERFACE)
|
||||
self._data_lv, _ = self._get_data_meta()
|
||||
|
||||
@property
|
||||
def DataLv(self):
|
||||
return dbus.ObjectPath(self._data_lv)
|
||||
|
||||
@staticmethod
|
||||
def _enable_disable_compression(pool_uuid, pool_name, enable, comp_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(pool_uuid, pool_name)
|
||||
# Rename the logical volume
|
||||
LvCommon.handle_execute(*cmdhandler.lv_vdo_compression(
|
||||
pool_name, enable, comp_options))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VDO_POOL_INTERFACE,
|
||||
in_signature='ia{sv}',
|
||||
out_signature='o',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def EnableCompression(self, tmo, comp_options, cb, cbe):
|
||||
r = RequestEntry(
|
||||
tmo, LvVdoPool._enable_disable_compression,
|
||||
(self.Uuid, self.lvm_id, True, comp_options),
|
||||
cb, cbe, False)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VDO_POOL_INTERFACE,
|
||||
in_signature='ia{sv}',
|
||||
out_signature='o',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def DisableCompression(self, tmo, comp_options, cb, cbe):
|
||||
r = RequestEntry(
|
||||
tmo, LvVdoPool._enable_disable_compression,
|
||||
(self.Uuid, self.lvm_id, False, comp_options),
|
||||
cb, cbe, False)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@staticmethod
|
||||
def _enable_disable_deduplication(pool_uuid, pool_name, enable, dedup_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
LvCommon.validate_dbus_object(pool_uuid, pool_name)
|
||||
# Rename the logical volume
|
||||
LvCommon.handle_execute(*cmdhandler.lv_vdo_deduplication(
|
||||
pool_name, enable, dedup_options))
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VDO_POOL_INTERFACE,
|
||||
in_signature='ia{sv}',
|
||||
out_signature='o',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def EnableDeduplication(self, tmo, dedup_options, cb, cbe):
|
||||
r = RequestEntry(
|
||||
tmo, LvVdoPool._enable_disable_deduplication,
|
||||
(self.Uuid, self.lvm_id, True, dedup_options),
|
||||
cb, cbe, False)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VDO_POOL_INTERFACE,
|
||||
in_signature='ia{sv}',
|
||||
out_signature='o',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def DisableDeduplication(self, tmo, dedup_options, cb, cbe):
|
||||
r = RequestEntry(
|
||||
tmo, LvVdoPool._enable_disable_deduplication,
|
||||
(self.Uuid, self.lvm_id, False, dedup_options),
|
||||
cb, cbe, False)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
class LvThinPool(Lv):
|
||||
@@ -912,8 +705,10 @@ class LvThinPool(Lv):
|
||||
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
|
||||
LvCommon.handle_execute(*cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes))
|
||||
|
||||
rc, out, err = cmdhandler.lv_lv_create(
|
||||
lv_name, create_options, name, size_bytes)
|
||||
LvCommon.handle_execute(rc, out, err)
|
||||
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
|
||||
return cfg.om.get_object_path_by_lvm_id(full_name)
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from lvmdbusd.utils import log_debug, log_error
|
||||
|
||||
|
||||
class DataStore(object):
|
||||
def __init__(self, usejson=True, vdo_support=False):
|
||||
def __init__(self, usejson=True):
|
||||
self.pvs = {}
|
||||
self.vgs = {}
|
||||
self.lvs = {}
|
||||
@@ -43,8 +43,6 @@ class DataStore(object):
|
||||
else:
|
||||
self.json = usejson
|
||||
|
||||
self.vdo_support = vdo_support
|
||||
|
||||
@staticmethod
|
||||
def _insert_record(table, key, record, allowed_multiple):
|
||||
if key in table:
|
||||
@@ -243,7 +241,8 @@ class DataStore(object):
|
||||
|
||||
return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
|
||||
|
||||
def _parse_lvs_json(self, _all):
|
||||
@staticmethod
|
||||
def _parse_lvs_json(_all):
|
||||
|
||||
c_lvs = OrderedDict()
|
||||
c_lv_full_lookup = {}
|
||||
@@ -263,13 +262,8 @@ class DataStore(object):
|
||||
if 'seg' in r:
|
||||
for s in r['seg']:
|
||||
r = c_lvs[s['lv_uuid']]
|
||||
r.setdefault('seg_pe_ranges', []).\
|
||||
append(s['seg_pe_ranges'])
|
||||
r.setdefault('seg_pe_ranges', []).append(s['seg_pe_ranges'])
|
||||
r.setdefault('segtype', []).append(s['segtype'])
|
||||
if self.vdo_support:
|
||||
for seg_key, seg_val in s.items():
|
||||
if seg_key.startswith("vdo_"):
|
||||
r[seg_key] = seg_val
|
||||
|
||||
return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ from .utils import log_debug, log_error
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from .cmdhandler import LvmFlightRecorder, supports_vdo
|
||||
from .cmdhandler import LvmFlightRecorder
|
||||
from .request import RequestEntry
|
||||
|
||||
|
||||
@@ -44,10 +44,10 @@ def process_request():
|
||||
try:
|
||||
req = cfg.worker_q.get(True, 5)
|
||||
log_debug(
|
||||
"Method start: %s with args %s (callback = %s)" %
|
||||
(str(req.method), str(req.arguments), str(req.cb)))
|
||||
"Running method: %s with args %s" %
|
||||
(str(req.method), str(req.arguments)))
|
||||
req.run_cmd()
|
||||
log_debug("Method complete: %s" % str(req.method))
|
||||
log_debug("Method complete ")
|
||||
except queue.Empty:
|
||||
pass
|
||||
except Exception:
|
||||
@@ -127,14 +127,6 @@ def main():
|
||||
log_error("You cannot specify --lvmshell and --nojson")
|
||||
sys.exit(1)
|
||||
|
||||
# We will dynamically add interfaces which support vdo if it
|
||||
# exists.
|
||||
cfg.vdo_support = supports_vdo()
|
||||
|
||||
if cfg.vdo_support and not cfg.args.use_json:
|
||||
log_error("You cannot specify --nojson when lvm has VDO support")
|
||||
sys.exit(1)
|
||||
|
||||
# List of threads that we start up
|
||||
thread_list = []
|
||||
|
||||
@@ -155,12 +147,12 @@ def main():
|
||||
cfg.om = Lvm(BASE_OBJ_PATH)
|
||||
cfg.om.register_object(Manager(MANAGER_OBJ_PATH))
|
||||
|
||||
cfg.db = lvmdb.DataStore(cfg.args.use_json, cfg.vdo_support)
|
||||
cfg.db = lvmdb.DataStore(cfg.args.use_json)
|
||||
|
||||
# Using a thread to process requests, we cannot hang the dbus library
|
||||
# thread that is handling the dbus interface
|
||||
thread_list.append(
|
||||
threading.Thread(target=process_request, name='process_request'))
|
||||
thread_list.append(threading.Thread(target=process_request,
|
||||
name='process_request'))
|
||||
|
||||
# Have a single thread handling updating lvm and the dbus model so we
|
||||
# don't have multiple threads doing this as the same time
|
||||
|
||||
@@ -27,7 +27,7 @@ class Manager(AutomatedProperties):
|
||||
|
||||
@property
|
||||
def Version(self):
|
||||
return dbus.String('1.1.0')
|
||||
return dbus.String('1.0.0')
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
@@ -107,10 +107,10 @@ class Manager(AutomatedProperties):
|
||||
rc = cfg.load(log=False)
|
||||
|
||||
if rc != 0:
|
||||
utils.log_debug('Manager.Refresh - exit %d %d' % (rc, lc),
|
||||
utils.log_debug('Manager.Refresh - exit %d' % (rc),
|
||||
'bg_black', 'fg_light_red')
|
||||
else:
|
||||
utils.log_debug('Manager.Refresh - exit %d %d' % (rc, lc))
|
||||
utils.log_debug('Manager.Refresh - exit %d' % (rc))
|
||||
return rc + lc
|
||||
|
||||
@dbus.service.method(
|
||||
|
||||
@@ -14,7 +14,7 @@ import dbus
|
||||
from .cfg import PV_INTERFACE
|
||||
from . import cmdhandler
|
||||
from .utils import vg_obj_path_generate, n, pv_obj_path_generate, \
|
||||
lv_object_path_method, _handle_execute
|
||||
lv_object_path_method
|
||||
from .loader import common
|
||||
from .request import RequestEntry
|
||||
from .state import State
|
||||
@@ -138,12 +138,19 @@ class Pv(AutomatedProperties):
|
||||
# Remove the PV, if successful then remove from the model
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
Pv.handle_execute(*cmdhandler.pv_remove(pv_name, remove_options))
|
||||
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
return _handle_execute(rc, out, err, PV_INTERFACE)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
PV_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(pv_uuid, pv_name):
|
||||
@@ -171,8 +178,10 @@ class Pv(AutomatedProperties):
|
||||
def _resize(pv_uuid, pv_name, new_size_bytes, resize_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
Pv.handle_execute(*cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
resize_options))
|
||||
|
||||
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
|
||||
resize_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -191,8 +200,9 @@ class Pv(AutomatedProperties):
|
||||
def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Pv.validate_dbus_object(pv_uuid, pv_name)
|
||||
Pv.handle_execute(*cmdhandler.pv_allocatable(pv_name, yes_no,
|
||||
allocation_options))
|
||||
rc, out, err = cmdhandler.pv_allocatable(
|
||||
pv_name, yes_no, allocation_options)
|
||||
Pv.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
|
||||
@@ -26,15 +26,6 @@ import signal
|
||||
STDOUT_TTY = os.isatty(sys.stdout.fileno())
|
||||
|
||||
|
||||
def _handle_execute(rc, out, err, interface):
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
interface, 'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
|
||||
def rtype(dbus_type):
|
||||
"""
|
||||
Decorator making sure that the decorated function returns a value of
|
||||
@@ -66,20 +57,8 @@ def n32(v):
|
||||
return int(float(v))
|
||||
|
||||
|
||||
@rtype(dbus.Double)
|
||||
def d(v):
|
||||
if not v:
|
||||
return 0.0
|
||||
return float(v)
|
||||
|
||||
|
||||
def _snake_to_pascal(s):
|
||||
return ''.join(x.title() for x in s.split('_'))
|
||||
|
||||
|
||||
# noinspection PyProtectedMember
|
||||
def init_class_from_arguments(
|
||||
obj_instance, begin_suffix=None, snake_to_pascal=False):
|
||||
def init_class_from_arguments(obj_instance):
|
||||
for k, v in list(sys._getframe(1).f_locals.items()):
|
||||
if k != 'self':
|
||||
nt = k
|
||||
@@ -90,17 +69,8 @@ def init_class_from_arguments(
|
||||
cur = getattr(obj_instance, nt, v)
|
||||
|
||||
# print 'Init class %s = %s' % (nt, str(v))
|
||||
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0)\
|
||||
and (begin_suffix is None or nt.startswith(begin_suffix)):
|
||||
|
||||
if begin_suffix and nt.startswith(begin_suffix):
|
||||
name = nt[len(begin_suffix):]
|
||||
if snake_to_pascal:
|
||||
name = _snake_to_pascal(name)
|
||||
|
||||
setattr(obj_instance, name, v)
|
||||
else:
|
||||
setattr(obj_instance, nt, v)
|
||||
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0):
|
||||
setattr(obj_instance, nt, v)
|
||||
|
||||
|
||||
def get_properties(f):
|
||||
@@ -368,8 +338,6 @@ def lv_object_path_method(name, meta):
|
||||
return _hidden_lv_obj_path_generate
|
||||
elif meta[0][0] == 't':
|
||||
return _thin_pool_obj_path_generate
|
||||
elif meta[0][0] == 'd':
|
||||
return _vdo_pool_object_path_generate
|
||||
elif meta[0][0] == 'C' and 'pool' in meta[1]:
|
||||
return _cache_pool_obj_path_generate
|
||||
|
||||
@@ -387,10 +355,6 @@ def _thin_pool_obj_path_generate():
|
||||
return cfg.THIN_POOL_PATH + "/%d" % next(cfg.thin_id)
|
||||
|
||||
|
||||
def _vdo_pool_object_path_generate():
|
||||
return cfg.VDO_POOL_PATH + "/%d" % next(cfg.vdo_id)
|
||||
|
||||
|
||||
def _cache_pool_obj_path_generate():
|
||||
return cfg.CACHE_POOL_PATH + "/%d" % next(cfg.cache_pool_id)
|
||||
|
||||
@@ -482,7 +446,7 @@ _ALLOWABLE_CH_SET = set(_ALLOWABLE_CH)
|
||||
_ALLOWABLE_VG_LV_CH = string.ascii_letters + string.digits + '.-_+'
|
||||
_ALLOWABLE_VG_LV_CH_SET = set(_ALLOWABLE_VG_LV_CH)
|
||||
_LV_NAME_RESERVED = ("_cdata", "_cmeta", "_corig", "_mimage", "_mlog",
|
||||
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin", "_vdata")
|
||||
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin")
|
||||
|
||||
# Tags can have the characters, based on the code
|
||||
# a-zA-Z0-9._-+/=!:&#
|
||||
|
||||
@@ -10,11 +10,10 @@
|
||||
from .automatedproperties import AutomatedProperties
|
||||
|
||||
from . import utils
|
||||
from .utils import pv_obj_path_generate, vg_obj_path_generate, n, \
|
||||
_handle_execute
|
||||
from .utils import pv_obj_path_generate, vg_obj_path_generate, n
|
||||
import dbus
|
||||
from . import cfg
|
||||
from .cfg import VG_INTERFACE, VG_VDO_INTERFACE
|
||||
from .cfg import VG_INTERFACE
|
||||
from . import cmdhandler
|
||||
from .request import RequestEntry
|
||||
from .loader import common
|
||||
@@ -47,7 +46,7 @@ def vgs_state_retrieve(selection, cache_refresh=True):
|
||||
|
||||
def load_vgs(vg_specific=None, object_path=None, refresh=False,
|
||||
emit_signal=False, cache_refresh=True):
|
||||
return common(vgs_state_retrieve, (Vg, VgVdo, ), vg_specific, object_path, refresh,
|
||||
return common(vgs_state_retrieve, (Vg,), vg_specific, object_path, refresh,
|
||||
emit_signal, cache_refresh)
|
||||
|
||||
|
||||
@@ -99,11 +98,7 @@ class VgState(State):
|
||||
if not path:
|
||||
path = cfg.om.get_object_path_by_uuid_lvm_id(
|
||||
self.Uuid, self.internal_name, vg_obj_path_generate)
|
||||
|
||||
if cfg.vdo_support:
|
||||
return VgVdo(path, self)
|
||||
else:
|
||||
return Vg(path, self)
|
||||
return Vg(path, self)
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def creation_signature(self):
|
||||
@@ -159,7 +154,13 @@ class Vg(AutomatedProperties):
|
||||
|
||||
@staticmethod
|
||||
def handle_execute(rc, out, err):
|
||||
return _handle_execute(rc, out, err, VG_INTERFACE)
|
||||
if rc == 0:
|
||||
cfg.load()
|
||||
else:
|
||||
# Need to work on error handling, need consistent
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE,
|
||||
'Exit code %s, stderr = %s' % (str(rc), err))
|
||||
|
||||
@staticmethod
|
||||
def validate_dbus_object(vg_uuid, vg_name):
|
||||
@@ -175,8 +176,9 @@ class Vg(AutomatedProperties):
|
||||
def _rename(uuid, vg_name, new_name, rename_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_rename(
|
||||
uuid, new_name, rename_options))
|
||||
rc, out, err = cmdhandler.vg_rename(
|
||||
uuid, new_name, rename_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -195,7 +197,8 @@ class Vg(AutomatedProperties):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
# Remove the VG, if successful then remove from the model
|
||||
Vg.handle_execute(*cmdhandler.vg_remove(vg_name, remove_options))
|
||||
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -211,7 +214,8 @@ class Vg(AutomatedProperties):
|
||||
@staticmethod
|
||||
def _change(uuid, vg_name, change_options):
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_change(change_options, vg_name))
|
||||
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
# TODO: This should be broken into a number of different methods
|
||||
@@ -247,8 +251,9 @@ class Vg(AutomatedProperties):
|
||||
VG_INTERFACE,
|
||||
'PV Object path not found = %s!' % pv_op)
|
||||
|
||||
Vg.handle_execute(*cmdhandler.vg_reduce(
|
||||
vg_name, missing, pv_devices, reduce_options))
|
||||
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
|
||||
reduce_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -278,8 +283,9 @@ class Vg(AutomatedProperties):
|
||||
VG_INTERFACE, 'PV Object path not found = %s!' % i)
|
||||
|
||||
if len(extend_devices):
|
||||
Vg.handle_execute(*cmdhandler.vg_extend(
|
||||
vg_name, extend_devices, extend_options))
|
||||
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
|
||||
extend_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
else:
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'No pv_object_paths provided!')
|
||||
@@ -333,8 +339,10 @@ class Vg(AutomatedProperties):
|
||||
|
||||
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
|
||||
|
||||
Vg.handle_execute(*cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests))
|
||||
rc, out, err = cmdhandler.vg_lv_create(
|
||||
vg_name, create_options, name, size_bytes, pv_dests)
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -372,8 +380,11 @@ class Vg(AutomatedProperties):
|
||||
thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool))
|
||||
|
||||
rc, out, err = cmdhandler.vg_lv_create_linear(
|
||||
vg_name, create_options, name, size_bytes, thin_pool)
|
||||
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -395,9 +406,10 @@ class Vg(AutomatedProperties):
|
||||
stripe_size_kb, thin_pool, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_lv_create_striped(
|
||||
rc, out, err = cmdhandler.vg_lv_create_striped(
|
||||
vg_name, create_options, name, size_bytes,
|
||||
num_stripes, stripe_size_kb, thin_pool))
|
||||
num_stripes, stripe_size_kb, thin_pool)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -422,8 +434,9 @@ class Vg(AutomatedProperties):
|
||||
num_copies, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies))
|
||||
rc, out, err = cmdhandler.vg_lv_create_mirror(
|
||||
vg_name, create_options, name, size_bytes, num_copies)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -446,9 +459,10 @@ class Vg(AutomatedProperties):
|
||||
num_stripes, stripe_size_kb, create_options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_lv_create_raid(
|
||||
rc, out, err = cmdhandler.vg_lv_create_raid(
|
||||
vg_name, create_options, name, raid_type, size_bytes,
|
||||
num_stripes, stripe_size_kb))
|
||||
num_stripes, stripe_size_kb)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return Vg.fetch_new_lv(vg_name, name)
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -546,8 +560,9 @@ class Vg(AutomatedProperties):
|
||||
raise dbus.exceptions.DBusException(
|
||||
VG_INTERFACE, 'PV object path = %s not found' % p)
|
||||
|
||||
Vg.handle_execute(*cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options))
|
||||
rc, out, err = cmdhandler.pv_tag(
|
||||
pv_devices, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -588,8 +603,9 @@ class Vg(AutomatedProperties):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
Vg.handle_execute(*cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options))
|
||||
rc, out, err = cmdhandler.vg_tag(
|
||||
vg_name, tags_add, tags_del, tag_options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -628,7 +644,8 @@ class Vg(AutomatedProperties):
|
||||
def _vg_change_set(uuid, vg_name, method, value, options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*method(vg_name, value, options))
|
||||
rc, out, err = method(vg_name, value, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -688,8 +705,9 @@ class Vg(AutomatedProperties):
|
||||
options):
|
||||
# Make sure we have a dbus object representing it
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options))
|
||||
rc, out, err = cmdhandler.activate_deactivate(
|
||||
'vgchange', vg_name, activate, control_flags, options)
|
||||
Vg.handle_execute(rc, out, err)
|
||||
return '/'
|
||||
|
||||
@dbus.service.method(
|
||||
@@ -777,71 +795,3 @@ class Vg(AutomatedProperties):
|
||||
@property
|
||||
def Clustered(self):
|
||||
return self._attribute(5, 'c')
|
||||
|
||||
|
||||
class VgVdo(Vg):
|
||||
|
||||
# noinspection PyUnusedLocal,PyPep8Naming
|
||||
def __init__(self, object_path, object_state):
|
||||
super(VgVdo, self).__init__(object_path, vgs_state_retrieve)
|
||||
self.set_interface(VG_VDO_INTERFACE)
|
||||
self._object_path = object_path
|
||||
self.state = object_state
|
||||
|
||||
@staticmethod
|
||||
def _lv_vdo_pool_create_with_lv(uuid, vg_name, pool_name, lv_name,
|
||||
data_size, virtual_size, create_options):
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
Vg.handle_execute(*cmdhandler.vg_create_vdo_pool_lv_and_lv(
|
||||
vg_name, pool_name, lv_name, data_size, virtual_size,
|
||||
create_options))
|
||||
return Vg.fetch_new_lv(vg_name, pool_name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_VDO_INTERFACE,
|
||||
in_signature='ssttia{sv}',
|
||||
out_signature='(oo)',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def CreateVdoPoolandLv(self, pool_name, lv_name, data_size, virtual_size,
|
||||
tmo, create_options, cb, cbe):
|
||||
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, pool_name)
|
||||
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, lv_name)
|
||||
|
||||
r = RequestEntry(tmo, VgVdo._lv_vdo_pool_create_with_lv,
|
||||
(self.state.Uuid, self.state.lvm_id,
|
||||
pool_name, lv_name, round_size(data_size),
|
||||
round_size(virtual_size),
|
||||
create_options), cb, cbe)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@staticmethod
|
||||
def _vdo_pool_create(uuid, vg_name, pool_lv, name, virtual_size, create_options):
|
||||
Vg.validate_dbus_object(uuid, vg_name)
|
||||
|
||||
# Retrieve the full name of the pool lv
|
||||
pool = cfg.om.get_object_by_path(pool_lv)
|
||||
if not pool:
|
||||
msg = 'LV with object path %s not present!' % \
|
||||
(pool_lv)
|
||||
raise dbus.exceptions.DBusException(VG_VDO_INTERFACE, msg)
|
||||
|
||||
Vg.handle_execute(*cmdhandler.vg_create_vdo_pool(
|
||||
pool.lv_full_name(), name, virtual_size,
|
||||
create_options))
|
||||
return Vg.fetch_new_lv(vg_name, pool.Name)
|
||||
|
||||
@dbus.service.method(
|
||||
dbus_interface=VG_VDO_INTERFACE,
|
||||
in_signature='ostia{sv}',
|
||||
out_signature='(oo)',
|
||||
async_callbacks=('cb', 'cbe'))
|
||||
def CreateVdoPool(self, pool_lv, name, virtual_size,
|
||||
tmo, create_options, cb, cbe):
|
||||
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, name)
|
||||
|
||||
r = RequestEntry(tmo, VgVdo._vdo_pool_create,
|
||||
(self.state.Uuid, self.state.lvm_id,
|
||||
pool_lv, name,
|
||||
round_size(virtual_size),
|
||||
create_options), cb, cbe)
|
||||
cfg.worker_q.put(r)
|
||||
|
||||
@@ -38,25 +38,18 @@ TARGETS = lvmlockd lvmlockctl
|
||||
|
||||
include $(top_builddir)/make.tmpl
|
||||
|
||||
CFLAGS += $(EXTRA_EXEC_CFLAGS)
|
||||
CFLAGS += $(EXTRA_EXEC_CFLAGS) $(SYSTEMD_CFLAGS)
|
||||
INCLUDES += -I$(top_srcdir)/libdaemon/server
|
||||
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
|
||||
LIBS += $(RT_LIBS) $(DAEMON_LIBS) $(PTHREAD_LIBS)
|
||||
LDFLAGS += $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
|
||||
LIBS += $(PTHREAD_LIBS) $(SYSTEMD_LIBS)
|
||||
|
||||
|
||||
ifeq ($(USE_SD_NOTIFY),yes)
|
||||
CFLAGS += $(shell pkg-config --cflags libsystemd) -DUSE_SD_NOTIFY
|
||||
LIBS += $(shell pkg-config --libs libsystemd)
|
||||
endif
|
||||
|
||||
lvmlockd: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
|
||||
$(top_builddir)/libdaemon/server/libdaemonserver.a
|
||||
lvmlockd: $(OBJECTS) $(top_builddir)/libdaemon/server/libdaemonserver.a $(INTERNAL_LIBS)
|
||||
@echo " [CC] $@"
|
||||
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LOCK_LIBS) -ldaemonserver $(INTERNAL_LIBS) $(LIBS)
|
||||
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $+ $(LOCK_LIBS) $(LIBS)
|
||||
|
||||
lvmlockctl: lvmlockctl.o $(top_builddir)/libdaemon/client/libdaemonclient.a
|
||||
lvmlockctl: lvmlockctl.o $(INTERNAL_LIBS)
|
||||
@echo " [CC] $@"
|
||||
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmlockctl.o $(INTERNAL_LIBS) $(LIBS)
|
||||
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $+ $(LIBS)
|
||||
|
||||
install_lvmlockd: lvmlockd
|
||||
@echo " [INSTALL] $<"
|
||||
|
||||
@@ -280,12 +280,13 @@ static void format_info_line(char *line, char *r_name, char *r_type)
|
||||
|
||||
static void format_info(void)
|
||||
{
|
||||
char line[MAX_LINE] = { 0 };
|
||||
char r_name[MAX_NAME+1] = { 0 };
|
||||
char r_type[MAX_NAME+1] = { 0 };
|
||||
char line[MAX_LINE];
|
||||
char r_name[MAX_NAME+1];
|
||||
char r_type[MAX_NAME+1];
|
||||
int i, j;
|
||||
|
||||
j = 0;
|
||||
memset(line, 0, sizeof(line));
|
||||
|
||||
for (i = 0; i < dump_len; i++) {
|
||||
line[j++] = dump_buf[i];
|
||||
@@ -325,8 +326,6 @@ static int _lvmlockd_result(daemon_reply reply, int *result)
|
||||
{
|
||||
int reply_result;
|
||||
|
||||
*result = NO_LOCKD_RESULT;
|
||||
|
||||
if (reply.error) {
|
||||
log_error("lvmlockd_result reply error %d", reply.error);
|
||||
return 0;
|
||||
@@ -338,7 +337,7 @@ static int _lvmlockd_result(daemon_reply reply, int *result)
|
||||
}
|
||||
|
||||
reply_result = daemon_reply_int(reply, "op_result", NO_LOCKD_RESULT);
|
||||
if (reply_result == NO_LOCKD_RESULT) {
|
||||
if (reply_result == -1000) {
|
||||
log_error("lvmlockd_result no op_result");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "libdaemon/client/daemon-client.h"
|
||||
|
||||
#define LVMLOCKD_SOCKET DEFAULT_RUN_DIR "/lvmlockd.socket"
|
||||
#define LVMLOCKD_ADOPT_FILE DEFAULT_RUN_DIR "/lvmlockd.adopt"
|
||||
|
||||
/* Wrappers to open/close connection */
|
||||
|
||||
@@ -23,9 +22,9 @@ static inline daemon_handle lvmlockd_open(const char *sock)
|
||||
daemon_info lvmlockd_info = {
|
||||
.path = "lvmlockd",
|
||||
.socket = sock ?: LVMLOCKD_SOCKET,
|
||||
.autostart = 0,
|
||||
.protocol = "lvmlockd",
|
||||
.protocol_version = 1,
|
||||
.autostart = 0
|
||||
};
|
||||
|
||||
return daemon_open(lvmlockd_info);
|
||||
@@ -33,7 +32,7 @@ static inline daemon_handle lvmlockd_open(const char *sock)
|
||||
|
||||
static inline void lvmlockd_close(daemon_handle h)
|
||||
{
|
||||
daemon_close(h);
|
||||
return daemon_close(h);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -31,15 +31,13 @@
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/un.h>
|
||||
|
||||
#ifdef USE_SD_NOTIFY
|
||||
#ifdef NOTIFYDBUS_SUPPORT
|
||||
#include <systemd/sd-daemon.h>
|
||||
#endif
|
||||
|
||||
#define EXTERN
|
||||
#include "lvmlockd-internal.h"
|
||||
|
||||
static int str_to_mode(const char *str);
|
||||
|
||||
/*
|
||||
* Basic operation of lvmlockd
|
||||
*
|
||||
@@ -144,8 +142,6 @@ static const char *lvmlockd_protocol = "lvmlockd";
|
||||
static const int lvmlockd_protocol_version = 1;
|
||||
static int daemon_quit;
|
||||
static int adopt_opt;
|
||||
static uint32_t adopt_update_count;
|
||||
static const char *adopt_file;
|
||||
|
||||
/*
|
||||
* We use a separate socket for dumping daemon info.
|
||||
@@ -815,144 +811,6 @@ int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsi
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write new info when a command exits if that command has acquired a new LV
|
||||
* lock. If the command has released an LV lock we don't bother updating the
|
||||
* info. When adopting, we eliminate any LV lock adoptions if there is no dm
|
||||
* device for that LV. If lvmlockd is terminated after acquiring but before
|
||||
* writing this file, those LV locks would not be adopted on restart.
|
||||
*/
|
||||
|
||||
#define ADOPT_VERSION_MAJOR 1
|
||||
#define ADOPT_VERSION_MINOR 0
|
||||
|
||||
static void write_adopt_file(void)
|
||||
{
|
||||
struct lockspace *ls;
|
||||
struct resource *r;
|
||||
struct lock *lk;
|
||||
time_t t;
|
||||
FILE *fp;
|
||||
|
||||
if (!(fp = fopen(adopt_file, "w")))
|
||||
return;
|
||||
|
||||
adopt_update_count++;
|
||||
|
||||
t = time(NULL);
|
||||
fprintf(fp, "lvmlockd adopt_version %u.%u pid %d updates %u %s",
|
||||
ADOPT_VERSION_MAJOR, ADOPT_VERSION_MINOR, getpid(), adopt_update_count, ctime(&t));
|
||||
|
||||
pthread_mutex_lock(&lockspaces_mutex);
|
||||
list_for_each_entry(ls, &lockspaces, list) {
|
||||
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
|
||||
continue;
|
||||
fprintf(fp, "VG: %38s %s %s %s\n",
|
||||
ls->vg_uuid, ls->vg_name, lm_str(ls->lm_type), ls->vg_args);
|
||||
list_for_each_entry(r, &ls->resources, list) {
|
||||
if (r->type != LD_RT_LV)
|
||||
continue;
|
||||
if ((r->mode != LD_LK_EX) && (r->mode != LD_LK_SH))
|
||||
continue;
|
||||
list_for_each_entry(lk, &r->locks, list) {
|
||||
fprintf(fp, "LV: %38s %s %s %s %u\n",
|
||||
ls->vg_uuid, r->name, r->lv_args, mode_str(r->mode), r->version);
|
||||
}
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&lockspaces_mutex);
|
||||
|
||||
fflush(fp);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
static int read_adopt_file(struct list_head *vg_lockd)
|
||||
{
|
||||
char adopt_line[512];
|
||||
char vg_uuid[72];
|
||||
char lm_type_str[16];
|
||||
char mode[8];
|
||||
struct lockspace *ls, *ls2;
|
||||
struct resource *r;
|
||||
FILE *fp;
|
||||
|
||||
if (MAX_ARGS != 64 || MAX_NAME != 64)
|
||||
return -1;
|
||||
|
||||
if (!(fp = fopen(adopt_file, "r")))
|
||||
return 0;
|
||||
|
||||
while (fgets(adopt_line, sizeof(adopt_line), fp)) {
|
||||
if (adopt_line[0] == '#')
|
||||
continue;
|
||||
else if (!strncmp(adopt_line, "lvmlockd", 8)) {
|
||||
unsigned int v_major = 0, v_minor = 0;
|
||||
if ((sscanf(adopt_line, "lvmlockd adopt_version %u.%u", &v_major, &v_minor) != 2) ||
|
||||
(v_major != ADOPT_VERSION_MAJOR))
|
||||
goto fail;
|
||||
|
||||
} else if (!strncmp(adopt_line, "VG:", 3)) {
|
||||
if (!(ls = alloc_lockspace()))
|
||||
goto fail;
|
||||
|
||||
memset(vg_uuid, 0, sizeof(vg_uuid));
|
||||
|
||||
if (sscanf(adopt_line, "VG: %63s %64s %16s %64s",
|
||||
vg_uuid, ls->vg_name, lm_type_str, ls->vg_args) != 4) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memcpy(ls->vg_uuid, vg_uuid, 64);
|
||||
|
||||
if ((ls->lm_type = str_to_lm(lm_type_str)) < 0)
|
||||
goto fail;
|
||||
|
||||
list_add(&ls->list, vg_lockd);
|
||||
|
||||
} else if (!strncmp(adopt_line, "LV:", 3)) {
|
||||
if (!(r = alloc_resource()))
|
||||
goto fail;
|
||||
|
||||
r->type = LD_RT_LV;
|
||||
|
||||
memset(vg_uuid, 0, sizeof(vg_uuid));
|
||||
|
||||
if (sscanf(adopt_line, "LV: %64s %64s %s %8s %u",
|
||||
vg_uuid, r->name, r->lv_args, mode, &r->version) != 5) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((r->adopt_mode = str_to_mode(mode)) == LD_LK_IV)
|
||||
goto fail;
|
||||
|
||||
if (ls && !memcmp(ls->vg_uuid, vg_uuid, 64)) {
|
||||
list_add(&r->list, &ls->resources);
|
||||
r = NULL;
|
||||
} else {
|
||||
list_for_each_entry(ls2, vg_lockd, list) {
|
||||
if (memcmp(ls2->vg_uuid, vg_uuid, 64))
|
||||
continue;
|
||||
list_add(&r->list, &ls2->resources);
|
||||
r = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (r) {
|
||||
log_error("No lockspace found for resource %s vg_uuid %s", r->name, vg_uuid);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fclose(fp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* These are few enough that arrays of function pointers can
|
||||
* be avoided.
|
||||
@@ -2372,7 +2230,7 @@ static void *lockspace_thread_main(void *arg_in)
|
||||
struct action *act_op_free = NULL;
|
||||
struct list_head tmp_act;
|
||||
struct list_head act_close;
|
||||
char tmp_name[MAX_NAME+5];
|
||||
char tmp_name[MAX_NAME+1];
|
||||
int free_vg = 0;
|
||||
int drop_vg = 0;
|
||||
int error = 0;
|
||||
@@ -2766,10 +2624,8 @@ out_act:
|
||||
* blank or fill it with garbage, but instead set it to REM:<name>
|
||||
* to make it easier to follow progress of freeing is via log_debug.
|
||||
*/
|
||||
memset(tmp_name, 0, sizeof(tmp_name));
|
||||
memcpy(tmp_name, "REM:", 4);
|
||||
strncpy(tmp_name+4, ls->name, sizeof(tmp_name)-4);
|
||||
memcpy(ls->name, tmp_name, sizeof(ls->name));
|
||||
dm_strncpy(tmp_name, ls->name, sizeof(tmp_name));
|
||||
snprintf(ls->name, sizeof(ls->name), "REM:%s", tmp_name);
|
||||
pthread_mutex_unlock(&lockspaces_mutex);
|
||||
|
||||
/* worker_thread will join this thread, and free the ls */
|
||||
@@ -4831,7 +4687,6 @@ static void *client_thread_main(void *arg_in)
|
||||
struct client *cl;
|
||||
struct action *act;
|
||||
struct action *act_un;
|
||||
uint32_t lock_acquire_count = 0, lock_acquire_written = 0;
|
||||
int rv;
|
||||
|
||||
while (1) {
|
||||
@@ -4863,9 +4718,6 @@ static void *client_thread_main(void *arg_in)
|
||||
rv = -1;
|
||||
}
|
||||
|
||||
if (act->flags & LD_AF_LV_LOCK)
|
||||
lock_acquire_count++;
|
||||
|
||||
/*
|
||||
* The client failed after we acquired an LV lock for
|
||||
* it, but before getting this reply saying it's done.
|
||||
@@ -4887,11 +4739,6 @@ static void *client_thread_main(void *arg_in)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (adopt_opt && (lock_acquire_count > lock_acquire_written)) {
|
||||
lock_acquire_written = lock_acquire_count;
|
||||
write_adopt_file();
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue incoming actions for lockspace threads
|
||||
*/
|
||||
@@ -4965,8 +4812,6 @@ static void *client_thread_main(void *arg_in)
|
||||
pthread_mutex_unlock(&client_mutex);
|
||||
}
|
||||
out:
|
||||
if (adopt_opt && lock_acquire_written)
|
||||
unlink(adopt_file);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -4999,6 +4844,180 @@ static void close_client_thread(void)
|
||||
log_error("pthread_join client_thread error %d", perrno);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a list of all VGs with a lockd type (sanlock|dlm).
|
||||
* We'll match this list against a list of existing lockspaces that are
|
||||
* found in the lock manager.
|
||||
*
|
||||
* For each of these VGs, also create a struct resource on ls->resources to
|
||||
* represent each LV in the VG that uses a lock. For each of these LVs
|
||||
* that are active, we'll attempt to adopt a lock.
|
||||
*/
|
||||
|
||||
static int get_lockd_vgs(struct list_head *vg_lockd)
|
||||
{
|
||||
/* FIXME: get VGs some other way */
|
||||
return -1;
|
||||
#if 0
|
||||
struct list_head update_vgs;
|
||||
daemon_reply reply;
|
||||
struct dm_config_node *cn;
|
||||
struct dm_config_node *metadata;
|
||||
struct dm_config_node *md_cn;
|
||||
struct dm_config_node *lv_cn;
|
||||
struct lockspace *ls, *safe;
|
||||
struct resource *r;
|
||||
const char *vg_name;
|
||||
const char *vg_uuid;
|
||||
const char *lv_uuid;
|
||||
const char *lock_type;
|
||||
const char *lock_args;
|
||||
char find_str_path[PATH_MAX];
|
||||
int rv = 0;
|
||||
|
||||
INIT_LIST_HEAD(&update_vgs);
|
||||
|
||||
reply = send_lvmetad("vg_list", "token = %s", "skip", NULL);
|
||||
|
||||
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
|
||||
log_error("vg_list from lvmetad failed %d", reply.error);
|
||||
rv = -EINVAL;
|
||||
goto destroy;
|
||||
}
|
||||
|
||||
if (!(cn = dm_config_find_node(reply.cft->root, "volume_groups"))) {
|
||||
log_error("get_lockd_vgs no vgs");
|
||||
rv = -EINVAL;
|
||||
goto destroy;
|
||||
}
|
||||
|
||||
/* create an update_vgs list of all vg uuids */
|
||||
|
||||
for (cn = cn->child; cn; cn = cn->sib) {
|
||||
vg_uuid = cn->key;
|
||||
|
||||
if (!(ls = alloc_lockspace())) {
|
||||
rv = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
strncpy(ls->vg_uuid, vg_uuid, 64);
|
||||
list_add_tail(&ls->list, &update_vgs);
|
||||
log_debug("get_lockd_vgs %s", vg_uuid);
|
||||
}
|
||||
destroy:
|
||||
daemon_reply_destroy(reply);
|
||||
|
||||
if (rv < 0)
|
||||
goto out;
|
||||
|
||||
/* get vg_name and lock_type for each vg uuid entry in update_vgs */
|
||||
|
||||
list_for_each_entry(ls, &update_vgs, list) {
|
||||
reply = send_lvmetad("vg_lookup",
|
||||
"token = %s", "skip",
|
||||
"uuid = %s", ls->vg_uuid,
|
||||
NULL);
|
||||
|
||||
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
|
||||
log_error("vg_lookup from lvmetad failed %d", reply.error);
|
||||
rv = -EINVAL;
|
||||
goto next;
|
||||
}
|
||||
|
||||
vg_name = daemon_reply_str(reply, "name", NULL);
|
||||
if (!vg_name) {
|
||||
log_error("get_lockd_vgs %s no name", ls->vg_uuid);
|
||||
rv = -EINVAL;
|
||||
goto next;
|
||||
}
|
||||
|
||||
strncpy(ls->vg_name, vg_name, MAX_NAME);
|
||||
|
||||
metadata = dm_config_find_node(reply.cft->root, "metadata");
|
||||
if (!metadata) {
|
||||
log_error("get_lockd_vgs %s name %s no metadata",
|
||||
ls->vg_uuid, ls->vg_name);
|
||||
rv = -EINVAL;
|
||||
goto next;
|
||||
}
|
||||
|
||||
lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL);
|
||||
ls->lm_type = str_to_lm(lock_type);
|
||||
|
||||
if ((ls->lm_type != LD_LM_SANLOCK) && (ls->lm_type != LD_LM_DLM)) {
|
||||
log_debug("get_lockd_vgs %s not lockd type", ls->vg_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
lock_args = dm_config_find_str(metadata, "metadata/lock_args", NULL);
|
||||
if (lock_args)
|
||||
strncpy(ls->vg_args, lock_args, MAX_ARGS);
|
||||
|
||||
log_debug("get_lockd_vgs %s lock_type %s lock_args %s",
|
||||
ls->vg_name, lock_type, lock_args ?: "none");
|
||||
|
||||
/*
|
||||
* Make a record (struct resource) of each lv that uses a lock.
|
||||
* For any lv that uses a lock, we'll check if the lv is active
|
||||
* and if so try to adopt a lock for it.
|
||||
*/
|
||||
|
||||
for (md_cn = metadata->child; md_cn; md_cn = md_cn->sib) {
|
||||
if (strcmp(md_cn->key, "logical_volumes"))
|
||||
continue;
|
||||
|
||||
for (lv_cn = md_cn->child; lv_cn; lv_cn = lv_cn->sib) {
|
||||
snprintf(find_str_path, PATH_MAX, "%s/lock_args", lv_cn->key);
|
||||
lock_args = dm_config_find_str(lv_cn, find_str_path, NULL);
|
||||
if (!lock_args)
|
||||
continue;
|
||||
|
||||
snprintf(find_str_path, PATH_MAX, "%s/id", lv_cn->key);
|
||||
lv_uuid = dm_config_find_str(lv_cn, find_str_path, NULL);
|
||||
|
||||
if (!lv_uuid) {
|
||||
log_error("get_lock_vgs no lv id for name %s", lv_cn->key);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(r = alloc_resource())) {
|
||||
rv = -ENOMEM;
|
||||
goto next;
|
||||
}
|
||||
|
||||
r->use_vb = 0;
|
||||
r->type = LD_RT_LV;
|
||||
strncpy(r->name, lv_uuid, MAX_NAME);
|
||||
if (lock_args)
|
||||
strncpy(r->lv_args, lock_args, MAX_ARGS);
|
||||
list_add_tail(&r->list, &ls->resources);
|
||||
log_debug("get_lockd_vgs %s lv %s %s (name %s)",
|
||||
ls->vg_name, r->name, lock_args ? lock_args : "", lv_cn->key);
|
||||
}
|
||||
}
|
||||
next:
|
||||
daemon_reply_destroy(reply);
|
||||
|
||||
if (rv < 0)
|
||||
break;
|
||||
}
|
||||
out:
|
||||
/* Return lockd VG's on the vg_lockd list. */
|
||||
|
||||
list_for_each_entry_safe(ls, safe, &update_vgs, list) {
|
||||
list_del(&ls->list);
|
||||
|
||||
if ((ls->lm_type == LD_LM_SANLOCK) || (ls->lm_type == LD_LM_DLM))
|
||||
list_add_tail(&ls->list, vg_lockd);
|
||||
else
|
||||
free(ls);
|
||||
}
|
||||
|
||||
return rv;
|
||||
#endif
|
||||
}
|
||||
|
||||
static char _dm_uuid[DM_UUID_LEN];
|
||||
|
||||
static char *get_dm_uuid(char *dm_name)
|
||||
@@ -5215,9 +5234,9 @@ static void adopt_locks(void)
|
||||
INIT_LIST_HEAD(&to_unlock);
|
||||
|
||||
/*
|
||||
* Get list of lockspaces from currently running lock managers.
|
||||
* Get list of shared VGs from file written by prior lvmlockd.
|
||||
* Get list of active LVs (in the shared VGs) from the file.
|
||||
* Get list of lockspaces from lock managers.
|
||||
* Get list of VGs from lvmetad with a lockd type.
|
||||
* Get list of active lockd type LVs from /dev.
|
||||
*/
|
||||
|
||||
if (lm_support_dlm() && lm_is_running_dlm()) {
|
||||
@@ -5241,17 +5260,12 @@ static void adopt_locks(void)
|
||||
* Adds a struct lockspace to vg_lockd for each lockd VG.
|
||||
* Adds a struct resource to ls->resources for each LV.
|
||||
*/
|
||||
rv = read_adopt_file(&vg_lockd);
|
||||
rv = get_lockd_vgs(&vg_lockd);
|
||||
if (rv < 0) {
|
||||
log_error("adopt_locks read_adopt_file failed");
|
||||
log_error("adopt_locks get_lockd_vgs failed");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (list_empty(&vg_lockd)) {
|
||||
log_debug("No lockspaces in adopt file");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For each resource on each lockspace, check if the
|
||||
* corresponding LV is active. If so, leave the
|
||||
@@ -5490,7 +5504,7 @@ static void adopt_locks(void)
|
||||
goto fail;
|
||||
act->op = LD_OP_LOCK;
|
||||
act->rt = LD_RT_LV;
|
||||
act->mode = r->adopt_mode;
|
||||
act->mode = LD_LK_EX;
|
||||
act->flags = (LD_AF_ADOPT | LD_AF_PERSISTENT);
|
||||
act->client_id = INTERNAL_CLIENT_ID;
|
||||
act->lm_type = ls->lm_type;
|
||||
@@ -5588,9 +5602,8 @@ static void adopt_locks(void)
|
||||
* Adopt failed because the orphan has a different mode
|
||||
* than initially requested. Repeat the lock-adopt operation
|
||||
* with the other mode. N.B. this logic depends on first
|
||||
* trying sh then ex for GL/VG locks; for LV locks the mode
|
||||
* from the adopt file is tried first, the alternate
|
||||
* (if the mode in adopt file was wrong somehow.)
|
||||
* trying sh then ex for GL/VG locks, and ex then sh for
|
||||
* LV locks.
|
||||
*/
|
||||
|
||||
if ((act->rt != LD_RT_LV) && (act->mode == LD_LK_SH)) {
|
||||
@@ -5598,12 +5611,9 @@ static void adopt_locks(void)
|
||||
act->mode = LD_LK_EX;
|
||||
rv = add_lock_action(act);
|
||||
|
||||
} else if (act->rt == LD_RT_LV) {
|
||||
/* LV locks: attempt to adopt the other mode. */
|
||||
if (act->mode == LD_LK_EX)
|
||||
act->mode = LD_LK_SH;
|
||||
else if (act->mode == LD_LK_SH)
|
||||
act->mode = LD_LK_EX;
|
||||
} else if ((act->rt == LD_RT_LV) && (act->mode == LD_LK_EX)) {
|
||||
/* LV locks: attempt to adopt sh after ex failed. */
|
||||
act->mode = LD_LK_SH;
|
||||
rv = add_lock_action(act);
|
||||
|
||||
} else {
|
||||
@@ -5738,13 +5748,10 @@ static void adopt_locks(void)
|
||||
if (count_start_fail || count_adopt_fail)
|
||||
goto fail;
|
||||
|
||||
unlink(adopt_file);
|
||||
write_adopt_file();
|
||||
log_debug("adopt_locks done");
|
||||
return;
|
||||
|
||||
fail:
|
||||
unlink(adopt_file);
|
||||
log_error("adopt_locks failed, reset host");
|
||||
}
|
||||
|
||||
@@ -6019,8 +6026,6 @@ static void usage(char *prog, FILE *file)
|
||||
fprintf(file, " Set path to the pid file. [%s]\n", LVMLOCKD_PIDFILE);
|
||||
fprintf(file, " --socket-path | -s <path>\n");
|
||||
fprintf(file, " Set path to the socket to listen on. [%s]\n", LVMLOCKD_SOCKET);
|
||||
fprintf(file, " --adopt-file <path>\n");
|
||||
fprintf(file, " Set path to the adopt file. [%s]\n", LVMLOCKD_ADOPT_FILE);
|
||||
fprintf(file, " --syslog-priority | -S err|warning|debug\n");
|
||||
fprintf(file, " Write log messages from this level up to syslog. [%s]\n", _syslog_num_to_name(LOG_SYSLOG_PRIO));
|
||||
fprintf(file, " --gl-type | -g <str>\n");
|
||||
@@ -6038,14 +6043,14 @@ static void usage(char *prog, FILE *file)
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
daemon_state ds = {
|
||||
.name = "lvmlockd",
|
||||
.daemon_main = main_loop,
|
||||
.daemon_init = NULL,
|
||||
.daemon_fini = NULL,
|
||||
.pidfile = getenv("LVM_LVMLOCKD_PIDFILE"),
|
||||
.socket_path = getenv("LVM_LVMLOCKD_SOCKET"),
|
||||
.protocol = lvmlockd_protocol,
|
||||
.protocol_version = lvmlockd_protocol_version,
|
||||
.daemon_init = NULL,
|
||||
.daemon_fini = NULL,
|
||||
.daemon_main = main_loop,
|
||||
.name = "lvmlockd",
|
||||
};
|
||||
|
||||
static struct option long_options[] = {
|
||||
@@ -6056,7 +6061,6 @@ int main(int argc, char *argv[])
|
||||
{"daemon-debug", no_argument, 0, 'D' },
|
||||
{"pid-file", required_argument, 0, 'p' },
|
||||
{"socket-path", required_argument, 0, 's' },
|
||||
{"adopt-file", required_argument, 0, 128 },
|
||||
{"gl-type", required_argument, 0, 'g' },
|
||||
{"host-id", required_argument, 0, 'i' },
|
||||
{"host-id-file", required_argument, 0, 'F' },
|
||||
@@ -6079,9 +6083,6 @@ int main(int argc, char *argv[])
|
||||
switch (c) {
|
||||
case '0':
|
||||
break;
|
||||
case 128:
|
||||
adopt_file = strdup(optarg);
|
||||
break;
|
||||
case 'h':
|
||||
usage(argv[0], stdout);
|
||||
exit(EXIT_SUCCESS);
|
||||
@@ -6143,9 +6144,6 @@ int main(int argc, char *argv[])
|
||||
if (!ds.socket_path)
|
||||
ds.socket_path = LVMLOCKD_SOCKET;
|
||||
|
||||
if (!adopt_file)
|
||||
adopt_file = LVMLOCKD_ADOPT_FILE;
|
||||
|
||||
/* runs daemon_main/main_loop */
|
||||
daemon_start(ds);
|
||||
|
||||
|
||||
@@ -128,18 +128,16 @@ static int read_cluster_name(char *clustername)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_VERSION 16
|
||||
|
||||
int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
|
||||
{
|
||||
char clustername[MAX_ARGS+1];
|
||||
char lock_args_version[MAX_VERSION+1];
|
||||
char lock_args_version[MAX_ARGS+1];
|
||||
int rv;
|
||||
|
||||
memset(clustername, 0, sizeof(clustername));
|
||||
memset(lock_args_version, 0, sizeof(lock_args_version));
|
||||
|
||||
snprintf(lock_args_version, MAX_VERSION, "%u.%u.%u",
|
||||
snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
|
||||
VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH);
|
||||
|
||||
rv = read_cluster_name(clustername);
|
||||
@@ -151,9 +149,7 @@ int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
|
||||
return -EARGS;
|
||||
}
|
||||
|
||||
rv = snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, clustername);
|
||||
if (rv >= MAX_ARGS)
|
||||
log_debug("init_vg_dlm vg_args may be too long %d %s", rv, vg_args);
|
||||
snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, clustername);
|
||||
rv = 0;
|
||||
|
||||
log_debug("init_vg_dlm done %s vg_args %s", ls_name, vg_args);
|
||||
@@ -398,18 +394,12 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
|
||||
(void *)1, (void *)1, (void *)1,
|
||||
NULL, NULL);
|
||||
|
||||
if (rv == -1 && (errno == EAGAIN)) {
|
||||
if (rv == -1 && errno == -EAGAIN) {
|
||||
log_debug("S %s R %s adopt_dlm adopt mode %d try other mode",
|
||||
ls->name, r->name, ld_mode);
|
||||
rv = -EUCLEAN;
|
||||
goto fail;
|
||||
}
|
||||
if (rv == -1 && (errno == ENOENT)) {
|
||||
log_debug("S %s R %s adopt_dlm adopt mode %d no lock",
|
||||
ls->name, r->name, ld_mode);
|
||||
rv = -ENOENT;
|
||||
goto fail;
|
||||
}
|
||||
if (rv < 0) {
|
||||
log_debug("S %s R %s adopt_dlm mode %d flags %x error %d errno %d",
|
||||
ls->name, r->name, mode, flags, rv, errno);
|
||||
|
||||
@@ -11,8 +11,6 @@
|
||||
#ifndef _LVM_LVMLOCKD_INTERNAL_H
|
||||
#define _LVM_LVMLOCKD_INTERNAL_H
|
||||
|
||||
#include "base/memory/container_of.h"
|
||||
|
||||
#define MAX_NAME 64
|
||||
#define MAX_ARGS 64
|
||||
|
||||
@@ -147,7 +145,6 @@ struct resource {
|
||||
char name[MAX_NAME+1]; /* vg name or lv name */
|
||||
int8_t type; /* resource type LD_RT_ */
|
||||
int8_t mode;
|
||||
int8_t adopt_mode;
|
||||
unsigned int sh_count; /* number of sh locks on locks list */
|
||||
uint32_t version;
|
||||
uint32_t last_client_id; /* last client_id to lock or unlock resource */
|
||||
@@ -158,7 +155,7 @@ struct resource {
|
||||
struct list_head locks;
|
||||
struct list_head actions;
|
||||
char lv_args[MAX_ARGS+1];
|
||||
char lm_data[]; /* lock manager specific data */
|
||||
char lm_data[0]; /* lock manager specific data */
|
||||
};
|
||||
|
||||
#define LD_LF_PERSISTENT 0x00000001
|
||||
@@ -219,6 +216,10 @@ struct val_blk {
|
||||
/* lm_unlock flags */
|
||||
#define LMUF_FREE_VG 0x00000001
|
||||
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
|
||||
(type *)( (char *)__mptr - offsetof(type,member) );})
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||
{
|
||||
list->next = list;
|
||||
|
||||
@@ -500,15 +500,13 @@ static int get_sizes_lockspace(char *path, int *sector_size, int *align_size)
|
||||
* version and lv name, and returns the real lock_args in vg_args.
|
||||
*/
|
||||
|
||||
#define MAX_VERSION 16
|
||||
|
||||
int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
|
||||
{
|
||||
struct sanlk_lockspace ss;
|
||||
struct sanlk_resourced rd;
|
||||
struct sanlk_disk disk;
|
||||
char lock_lv_name[MAX_ARGS+1];
|
||||
char lock_args_version[MAX_VERSION+1];
|
||||
char lock_args_version[MAX_ARGS+1];
|
||||
const char *gl_name = NULL;
|
||||
uint32_t daemon_version;
|
||||
uint32_t daemon_proto;
|
||||
@@ -528,7 +526,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
|
||||
return -EARGS;
|
||||
}
|
||||
|
||||
snprintf(lock_args_version, MAX_VERSION, "%u.%u.%u",
|
||||
snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
|
||||
VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH);
|
||||
|
||||
/* see comment above about input vg_args being only lock_lv_name */
|
||||
@@ -545,9 +543,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
|
||||
if (daemon_test) {
|
||||
if (!gl_lsname_sanlock[0])
|
||||
strncpy(gl_lsname_sanlock, ls_name, MAX_NAME);
|
||||
rv = snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name);
|
||||
if (rv >= MAX_ARGS)
|
||||
log_debug("init_vg_san vg_args may be too long %d %s", rv, vg_args);
|
||||
snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -639,9 +635,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
|
||||
if (!strcmp(gl_name, R_NAME_GL))
|
||||
strncpy(gl_lsname_sanlock, ls_name, MAX_NAME);
|
||||
|
||||
rv = snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name);
|
||||
if (rv >= MAX_ARGS)
|
||||
log_debug("init_vg_san vg_args may be too long %d %s", rv, vg_args);
|
||||
snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name);
|
||||
|
||||
log_debug("S %s init_vg_san done vg_args %s", ls_name, vg_args);
|
||||
|
||||
@@ -698,7 +692,7 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
|
||||
{
|
||||
struct sanlk_resourced rd;
|
||||
char lock_lv_name[MAX_ARGS+1];
|
||||
char lock_args_version[MAX_VERSION+1];
|
||||
char lock_args_version[MAX_ARGS+1];
|
||||
uint64_t offset;
|
||||
int rv;
|
||||
|
||||
@@ -713,7 +707,7 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
|
||||
return rv;
|
||||
}
|
||||
|
||||
snprintf(lock_args_version, MAX_VERSION, "%u.%u.%u",
|
||||
snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
|
||||
LV_LOCK_ARGS_MAJOR, LV_LOCK_ARGS_MINOR, LV_LOCK_ARGS_PATCH);
|
||||
|
||||
if (daemon_test) {
|
||||
|
||||
@@ -915,7 +915,7 @@ int main(int argc, char *argv[])
|
||||
int option_index = 0;
|
||||
int client = 0, server = 0;
|
||||
unsigned action = ACTION_MAX;
|
||||
struct timespec timeout;
|
||||
struct timeval timeout;
|
||||
daemon_idle di = { .ptimeout = &timeout };
|
||||
struct lvmpolld_state ls = { .log_config = "" };
|
||||
daemon_state s = {
|
||||
|
||||
@@ -121,9 +121,7 @@ enum {
|
||||
|
||||
DM_DEVICE_SET_GEOMETRY,
|
||||
|
||||
DM_DEVICE_ARM_POLL,
|
||||
|
||||
DM_DEVICE_GET_TARGET_VERSION
|
||||
DM_DEVICE_ARM_POLL
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -164,20 +162,20 @@ struct dm_info {
|
||||
struct dm_deps {
|
||||
uint32_t count;
|
||||
uint32_t filler;
|
||||
uint64_t device[];
|
||||
uint64_t device[0];
|
||||
};
|
||||
|
||||
struct dm_names {
|
||||
uint64_t dev;
|
||||
uint32_t next; /* Offset to next struct from start of this struct */
|
||||
char name[];
|
||||
char name[0];
|
||||
};
|
||||
|
||||
struct dm_versions {
|
||||
uint32_t next; /* Offset to next struct from start of this struct */
|
||||
uint32_t version[3];
|
||||
|
||||
char name[];
|
||||
char name[0];
|
||||
};
|
||||
|
||||
int dm_get_library_version(char *version, size_t size);
|
||||
@@ -234,7 +232,6 @@ int dm_task_suppress_identical_reload(struct dm_task *dmt);
|
||||
int dm_task_secure_data(struct dm_task *dmt);
|
||||
int dm_task_retry_remove(struct dm_task *dmt);
|
||||
int dm_task_deferred_remove(struct dm_task *dmt);
|
||||
void dm_task_skip_reload_params_compare(struct dm_task *dmt);
|
||||
|
||||
/*
|
||||
* Record timestamp immediately after the ioctl returns.
|
||||
@@ -384,7 +381,7 @@ int dm_get_status_cache(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_cache **status);
|
||||
|
||||
struct dm_status_writecache {
|
||||
uint64_t error;
|
||||
uint32_t error;
|
||||
uint64_t total_blocks;
|
||||
uint64_t free_blocks;
|
||||
uint64_t writeback_blocks;
|
||||
@@ -393,15 +390,6 @@ struct dm_status_writecache {
|
||||
int dm_get_status_writecache(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_writecache **status);
|
||||
|
||||
struct dm_status_integrity {
|
||||
uint64_t number_of_mismatches;
|
||||
uint64_t provided_data_sectors;
|
||||
uint64_t recalc_sector;
|
||||
};
|
||||
|
||||
int dm_get_status_integrity(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_integrity **status);
|
||||
|
||||
/*
|
||||
* Parse params from STATUS call for snapshot target
|
||||
*
|
||||
@@ -915,7 +903,6 @@ int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
|
||||
#define DM_CACHE_FEATURE_WRITETHROUGH 0x00000002
|
||||
#define DM_CACHE_FEATURE_PASSTHROUGH 0x00000004
|
||||
#define DM_CACHE_FEATURE_METADATA2 0x00000008 /* cache v1.10 */
|
||||
#define DM_CACHE_FEATURE_NO_DISCARD_PASSDOWN 0x00000010
|
||||
|
||||
struct dm_config_node;
|
||||
/*
|
||||
@@ -951,8 +938,6 @@ struct writecache_settings {
|
||||
uint64_t autocommit_time; /* in milliseconds */
|
||||
uint32_t fua;
|
||||
uint32_t nofua;
|
||||
uint32_t cleaner;
|
||||
uint32_t max_age;
|
||||
|
||||
/*
|
||||
* Allow an unrecognized key and its val to be passed to the kernel for
|
||||
@@ -972,8 +957,6 @@ struct writecache_settings {
|
||||
unsigned autocommit_time_set:1;
|
||||
unsigned fua_set:1;
|
||||
unsigned nofua_set:1;
|
||||
unsigned cleaner_set:1;
|
||||
unsigned max_age_set:1;
|
||||
};
|
||||
|
||||
int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
|
||||
@@ -984,42 +967,12 @@ int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
|
||||
uint32_t writecache_block_size,
|
||||
struct writecache_settings *settings);
|
||||
|
||||
struct integrity_settings {
|
||||
char mode[8];
|
||||
uint32_t tag_size;
|
||||
uint32_t block_size; /* optional table param always set by lvm */
|
||||
const char *internal_hash; /* optional table param always set by lvm */
|
||||
|
||||
uint32_t journal_sectors;
|
||||
uint32_t interleave_sectors;
|
||||
uint32_t buffer_sectors;
|
||||
uint32_t journal_watermark;
|
||||
uint32_t commit_time;
|
||||
uint32_t bitmap_flush_interval;
|
||||
uint64_t sectors_per_bit;
|
||||
|
||||
unsigned journal_sectors_set:1;
|
||||
unsigned interleave_sectors_set:1;
|
||||
unsigned buffer_sectors_set:1;
|
||||
unsigned journal_watermark_set:1;
|
||||
unsigned commit_time_set:1;
|
||||
unsigned bitmap_flush_interval_set:1;
|
||||
unsigned sectors_per_bit_set:1;
|
||||
};
|
||||
|
||||
int dm_tree_node_add_integrity_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const char *origin_uuid,
|
||||
const char *meta_uuid,
|
||||
struct integrity_settings *settings,
|
||||
int recalculate);
|
||||
|
||||
/*
|
||||
* VDO target
|
||||
*/
|
||||
int dm_tree_node_add_vdo_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const char *vdo_pool_name,
|
||||
const char *data_uuid,
|
||||
uint64_t data_size,
|
||||
const struct dm_vdo_target_params *param);
|
||||
@@ -1318,7 +1271,7 @@ int dm_bit_get_next(dm_bitset_t bs, int last_bit);
|
||||
int dm_bit_get_last(dm_bitset_t bs);
|
||||
int dm_bit_get_prev(dm_bitset_t bs, int last_bit);
|
||||
|
||||
#define DM_BITS_PER_INT ((unsigned)sizeof(int) * CHAR_BIT)
|
||||
#define DM_BITS_PER_INT (sizeof(int) * CHAR_BIT)
|
||||
|
||||
#define dm_bit(bs, i) \
|
||||
((bs)[((i) / DM_BITS_PER_INT) + 1] & (0x1 << ((i) & (DM_BITS_PER_INT - 1))))
|
||||
|
||||
@@ -119,9 +119,6 @@ static struct cmd_data _cmd_data_v4[] = {
|
||||
#ifdef DM_DEV_ARM_POLL
|
||||
{"armpoll", DM_DEV_ARM_POLL, {4, 36, 0}},
|
||||
#endif
|
||||
#ifdef DM_GET_TARGET_VERSION
|
||||
{"target-version", DM_GET_TARGET_VERSION, {4, 41, 0}},
|
||||
#endif
|
||||
};
|
||||
/* *INDENT-ON* */
|
||||
|
||||
@@ -205,7 +202,7 @@ static int _get_proc_number(const char *file, const char *name,
|
||||
}
|
||||
|
||||
while (getline(&line, &len, fl) != -1) {
|
||||
if (sscanf(line, "%u %255s\n", &num, &nm[0]) == 2) {
|
||||
if (sscanf(line, "%d %255s\n", &num, &nm[0]) == 2) {
|
||||
if (!strcmp(name, nm)) {
|
||||
if (number) {
|
||||
*number = num;
|
||||
@@ -805,11 +802,6 @@ int dm_task_suppress_identical_reload(struct dm_task *dmt)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void dm_task_skip_reload_params_compare(struct dm_task *dmt)
|
||||
{
|
||||
dmt->skip_reload_params_compare = 1;
|
||||
}
|
||||
|
||||
int dm_task_set_add_node(struct dm_task *dmt, dm_add_node_t add_node)
|
||||
{
|
||||
switch (add_node) {
|
||||
@@ -1580,29 +1572,11 @@ static int _reload_with_suppression_v4(struct dm_task *dmt)
|
||||
len = strlen(t2->params);
|
||||
while (len-- > 0 && t2->params[len] == ' ')
|
||||
t2->params[len] = '\0';
|
||||
|
||||
if (t1->start != t2->start) {
|
||||
log_debug("reload %u:%u start diff", task->major, task->minor);
|
||||
if ((t1->start != t2->start) ||
|
||||
(t1->length != t2->length) ||
|
||||
(strcmp(t1->type, t2->type)) ||
|
||||
(strcmp(t1->params, t2->params)))
|
||||
goto no_match;
|
||||
}
|
||||
if (t1->length != t2->length) {
|
||||
log_debug("reload %u:%u length diff", task->major, task->minor);
|
||||
goto no_match;
|
||||
}
|
||||
if (strcmp(t1->type, t2->type)) {
|
||||
log_debug("reload %u:%u type diff %s %s", task->major, task->minor, t1->type, t2->type);
|
||||
goto no_match;
|
||||
}
|
||||
if (strcmp(t1->params, t2->params)) {
|
||||
if (dmt->skip_reload_params_compare)
|
||||
log_debug("reload %u:%u skip params ignore %s %s",
|
||||
task->major, task->minor, t1->params, t2->params);
|
||||
else {
|
||||
log_debug("reload %u:%u params diff", task->major, task->minor);
|
||||
goto no_match;
|
||||
}
|
||||
}
|
||||
|
||||
t1 = t1->next;
|
||||
t2 = t2->next;
|
||||
}
|
||||
|
||||
@@ -59,7 +59,6 @@ struct dm_task {
|
||||
int skip_lockfs;
|
||||
int query_inactive_table;
|
||||
int suppress_identical_reload;
|
||||
int skip_reload_params_compare;
|
||||
dm_add_node_t add_node;
|
||||
uint64_t existing_table_size;
|
||||
int cookie_set;
|
||||
|
||||
@@ -512,7 +512,7 @@ int unmangle_string(const char *str, const char *str_name, size_t len,
|
||||
int strict = mode != DM_STRING_MANGLING_NONE;
|
||||
char str_rest[DM_NAME_LEN];
|
||||
size_t i, j;
|
||||
unsigned int code;
|
||||
int code;
|
||||
int r = 0;
|
||||
|
||||
if (!str || !buf)
|
||||
@@ -1445,7 +1445,7 @@ struct node_op_parms {
|
||||
char *old_name;
|
||||
int warn_if_udev_failed;
|
||||
unsigned rely_on_udev;
|
||||
char names[];
|
||||
char names[0];
|
||||
};
|
||||
|
||||
static void _store_str(char **pos, char **ptr, const char *str)
|
||||
@@ -1874,120 +1874,6 @@ bad:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _sysfs_get_dev_major_minor(const char *path, uint32_t major, uint32_t minor)
|
||||
{
|
||||
FILE *fp;
|
||||
uint32_t ma, mi;
|
||||
int r;
|
||||
|
||||
if (!(fp = fopen(path, "r")))
|
||||
return 0;
|
||||
|
||||
r = (fscanf(fp, "%" PRIu32 ":%" PRIu32 , &ma, &mi) == 2) &&
|
||||
(ma == major) && (mi == minor);
|
||||
// log_debug("Checking %s %u:%u -> %d", path, ma, mi, r);
|
||||
|
||||
if (fclose(fp))
|
||||
log_sys_error("fclose", path);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _sysfs_find_kernel_name(uint32_t major, uint32_t minor, char *buf, size_t buf_size)
|
||||
{
|
||||
const char *name, *name_dev;
|
||||
char path[PATH_MAX];
|
||||
struct dirent *dirent, *dirent_dev;
|
||||
DIR *d, *d_dev;
|
||||
struct stat st;
|
||||
int r = 0, sz;
|
||||
|
||||
if (!*_sysfs_dir ||
|
||||
dm_snprintf(path, sizeof(path), "%s/block/", _sysfs_dir) < 0) {
|
||||
log_error("Failed to build sysfs_path.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(d = opendir(path))) {
|
||||
log_sys_error("opendir", path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (!r && (dirent = readdir(d))) {
|
||||
name = dirent->d_name;
|
||||
|
||||
if (!strcmp(name, ".") || !strcmp(name, ".."))
|
||||
continue;
|
||||
|
||||
if ((sz = dm_snprintf(path, sizeof(path), "%sblock/%s/dev",
|
||||
_sysfs_dir, name)) == -1) {
|
||||
log_warn("Couldn't create path for %s.", name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_sysfs_get_dev_major_minor(path, major, minor)) {
|
||||
r = dm_strncpy(buf, name, buf_size);
|
||||
break; /* found */
|
||||
}
|
||||
|
||||
path[sz - 4] = 0; /* strip /dev from end of path string */
|
||||
if (stat(path, &st))
|
||||
continue;
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
|
||||
/* let's assume there is no tree-complex device in past systems */
|
||||
if (!(d_dev = opendir(path))) {
|
||||
log_sys_debug("opendir", path);
|
||||
continue;
|
||||
}
|
||||
|
||||
while ((dirent_dev = readdir(d_dev))) {
|
||||
name_dev = dirent_dev->d_name;
|
||||
|
||||
/* skip known ignorable paths */
|
||||
if (!strcmp(name_dev, ".") || !strcmp(name_dev, "..") ||
|
||||
!strcmp(name_dev, "bdi") ||
|
||||
!strcmp(name_dev, "dev") ||
|
||||
!strcmp(name_dev, "device") ||
|
||||
!strcmp(name_dev, "holders") ||
|
||||
!strcmp(name_dev, "integrity") ||
|
||||
!strcmp(name_dev, "loop") ||
|
||||
!strcmp(name_dev, "queueu") ||
|
||||
!strcmp(name_dev, "md") ||
|
||||
!strcmp(name_dev, "mq") ||
|
||||
!strcmp(name_dev, "power") ||
|
||||
!strcmp(name_dev, "removable") ||
|
||||
!strcmp(name_dev, "slave") ||
|
||||
!strcmp(name_dev, "slaves") ||
|
||||
!strcmp(name_dev, "subsystem") ||
|
||||
!strcmp(name_dev, "trace") ||
|
||||
!strcmp(name_dev, "uevent"))
|
||||
continue;
|
||||
|
||||
if (dm_snprintf(path, sizeof(path), "%sblock/%s/%s/dev",
|
||||
_sysfs_dir, name, name_dev) == -1) {
|
||||
log_warn("Couldn't create path for %s/%s.", name, name_dev);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_sysfs_get_dev_major_minor(path, major, minor)) {
|
||||
r = dm_strncpy(buf, name_dev, buf_size);
|
||||
break; /* found */
|
||||
}
|
||||
}
|
||||
|
||||
if (closedir(d_dev))
|
||||
log_sys_debug("closedir", name);
|
||||
}
|
||||
}
|
||||
|
||||
if (closedir(d))
|
||||
log_sys_debug("closedir", path);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _sysfs_get_kernel_name(uint32_t major, uint32_t minor, char *buf, size_t buf_size)
|
||||
{
|
||||
char *name, *sysfs_path, *temp_buf = NULL;
|
||||
@@ -2010,11 +1896,8 @@ static int _sysfs_get_kernel_name(uint32_t major, uint32_t minor, char *buf, siz
|
||||
if ((size = readlink(sysfs_path, temp_buf, PATH_MAX - 1)) < 0) {
|
||||
if (errno != ENOENT)
|
||||
log_sys_error("readlink", sysfs_path);
|
||||
else {
|
||||
else
|
||||
log_sys_debug("readlink", sysfs_path);
|
||||
r = _sysfs_find_kernel_name(major, minor, buf, buf_size);
|
||||
goto out;
|
||||
}
|
||||
goto bad;
|
||||
}
|
||||
temp_buf[size] = '\0';
|
||||
@@ -2034,7 +1917,6 @@ static int _sysfs_get_kernel_name(uint32_t major, uint32_t minor, char *buf, siz
|
||||
strcpy(buf, name);
|
||||
r = 1;
|
||||
bad:
|
||||
out:
|
||||
free(temp_buf);
|
||||
free(sysfs_path);
|
||||
|
||||
|
||||
@@ -51,8 +51,6 @@ struct parser {
|
||||
|
||||
struct dm_pool *mem;
|
||||
int no_dup_node_check; /* whether to disable dup node checking */
|
||||
const char *key; /* last obtained key */
|
||||
unsigned ignored_creation_time;
|
||||
};
|
||||
|
||||
struct config_output {
|
||||
@@ -178,7 +176,7 @@ static int _do_dm_config_parse(struct dm_config_tree *cft, const char *start, co
|
||||
/* TODO? if (start == end) return 1; */
|
||||
|
||||
struct parser *p;
|
||||
if (!(p = dm_pool_zalloc(cft->mem, sizeof(*p))))
|
||||
if (!(p = dm_pool_alloc(cft->mem, sizeof(*p))))
|
||||
return_0;
|
||||
|
||||
p->mem = cft->mem;
|
||||
@@ -617,7 +615,6 @@ static struct dm_config_node *_section(struct parser *p, struct dm_config_node *
|
||||
match(TOK_SECTION_E);
|
||||
} else {
|
||||
match(TOK_EQ);
|
||||
p->key = root->key;
|
||||
if (!(value = _value(p)))
|
||||
return_NULL;
|
||||
if (root->v)
|
||||
@@ -685,17 +682,8 @@ static struct dm_config_value *_type(struct parser *p)
|
||||
errno = 0;
|
||||
v->v.i = strtoll(p->tb, NULL, 0); /* FIXME: check error */
|
||||
if (errno) {
|
||||
if (errno == ERANGE && p->key &&
|
||||
strcmp("creation_time", p->key) == 0) {
|
||||
/* Due to a bug in some older 32bit builds (<2.02.169),
|
||||
* lvm was able to produce invalid creation_time string */
|
||||
v->v.i = 1527120000; /* Pick 2018-05-24 day instead */
|
||||
if (!p->ignored_creation_time++)
|
||||
log_warn("WARNING: Invalid creation_time found in metadata (repaired with next metadata update).");
|
||||
} else {
|
||||
log_error("Failed to read int token.");
|
||||
return NULL;
|
||||
}
|
||||
log_error("Failed to read int token.");
|
||||
return NULL;
|
||||
}
|
||||
match(TOK_INT);
|
||||
break;
|
||||
|
||||
@@ -38,7 +38,6 @@ enum {
|
||||
SEG_STRIPED,
|
||||
SEG_ZERO,
|
||||
SEG_WRITECACHE,
|
||||
SEG_INTEGRITY,
|
||||
SEG_THIN_POOL,
|
||||
SEG_THIN,
|
||||
SEG_VDO,
|
||||
@@ -79,7 +78,6 @@ static const struct {
|
||||
{ SEG_STRIPED, "striped" },
|
||||
{ SEG_ZERO, "zero"},
|
||||
{ SEG_WRITECACHE, "writecache"},
|
||||
{ SEG_INTEGRITY, "integrity"},
|
||||
{ SEG_THIN_POOL, "thin-pool"},
|
||||
{ SEG_THIN, "thin"},
|
||||
{ SEG_VDO, "vdo" },
|
||||
@@ -223,11 +221,6 @@ struct load_segment {
|
||||
int writecache_pmem; /* writecache, 1 if pmem, 0 if ssd */
|
||||
uint32_t writecache_block_size; /* writecache, in bytes */
|
||||
struct writecache_settings writecache_settings; /* writecache */
|
||||
|
||||
uint64_t integrity_data_sectors; /* integrity (provided_data_sectors) */
|
||||
struct dm_tree_node *integrity_meta_node; /* integrity */
|
||||
struct integrity_settings integrity_settings; /* integrity */
|
||||
int integrity_recalculate; /* integrity */
|
||||
};
|
||||
|
||||
/* Per-device properties */
|
||||
@@ -274,16 +267,6 @@ struct load_properties {
|
||||
*/
|
||||
unsigned delay_resume_if_extended;
|
||||
|
||||
/*
|
||||
* When comparing table lines to decide if a reload is
|
||||
* needed, ignore any differences betwen the lvm device
|
||||
* params and the kernel-reported device params.
|
||||
* dm-integrity reports many internal parameters on the
|
||||
* table line when lvm does not explicitly set them,
|
||||
* causing lvm and the kernel to have differing params.
|
||||
*/
|
||||
unsigned skip_reload_params_compare;
|
||||
|
||||
/*
|
||||
* Call node_send_messages(), set to 2 if there are messages
|
||||
* When != 0, it validates matching transaction id, thus thin-pools
|
||||
@@ -1589,37 +1572,8 @@ static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_messa
|
||||
}
|
||||
|
||||
if (!_node_message(dnode->info.major, dnode->info.minor,
|
||||
tm->expected_errno, buf)) {
|
||||
switch (m->type) {
|
||||
case DM_THIN_MESSAGE_CREATE_SNAP:
|
||||
case DM_THIN_MESSAGE_CREATE_THIN:
|
||||
if (errno == EEXIST) {
|
||||
/*
|
||||
* ATM errno from ioctl() is preserved through code error path chain
|
||||
* If this would ever change, another way need to be used to
|
||||
* obtain result from failed DM message
|
||||
*/
|
||||
log_error("Thin pool %s already contain thin device with device_id %u.",
|
||||
_node_name(dnode), m->u.m_create_snap.device_id);
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
* Give some useful advice how to solve this problem,
|
||||
* until lvconvert --repair can handle this automatically
|
||||
*/
|
||||
log_error("Manual intervention may be required to remove device dev_id=%u in thin pool metadata.",
|
||||
m->u.m_create_snap.device_id);
|
||||
log_error("Optionally new thin volume with device_id=%u can be manually added into a volume group.",
|
||||
m->u.m_create_snap.device_id);
|
||||
log_warn("WARNING: When uncertain how to do this, contact support!");
|
||||
return 0;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
return_0;
|
||||
}
|
||||
|
||||
}
|
||||
tm->expected_errno, buf))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1666,15 +1620,6 @@ static int _thin_pool_node_send_messages(struct dm_tree_node *dnode,
|
||||
if (!have_messages || !send)
|
||||
return 1; /* transaction_id is matching */
|
||||
|
||||
if (stp.fail || stp.read_only || stp.needs_check) {
|
||||
log_error("Cannot send messages to thin pool %s%s%s%s.",
|
||||
_node_name(dnode),
|
||||
stp.fail ? " in failed state" : "",
|
||||
stp.read_only ? " with read only metadata" : "",
|
||||
stp.needs_check ? " which needs check first" : "");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_list_iterate_items(tmsg, &seg->thin_messages) {
|
||||
if (!(_thin_pool_node_message(dnode, tmsg)))
|
||||
return_0;
|
||||
@@ -2136,7 +2081,7 @@ int dm_tree_activate_children(struct dm_tree_node *dnode,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _create_node(struct dm_tree_node *dnode, struct dm_tree_node *parent)
|
||||
static int _create_node(struct dm_tree_node *dnode)
|
||||
{
|
||||
int r = 0;
|
||||
struct dm_task *dmt;
|
||||
@@ -2185,15 +2130,38 @@ static int _create_node(struct dm_tree_node *dnode, struct dm_tree_node *parent)
|
||||
"Unable to get DM task info for %s.",
|
||||
dnode->name);
|
||||
}
|
||||
|
||||
if (r)
|
||||
dm_list_add_h(&parent->activated, &dnode->activated_list);
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* _remove_node
|
||||
*
|
||||
* This function is only used to remove a DM device that has failed
|
||||
* to load any table.
|
||||
*/
|
||||
static int _remove_node(struct dm_tree_node *dnode)
|
||||
{
|
||||
if (!dnode->info.exists)
|
||||
return 1;
|
||||
|
||||
if (dnode->info.live_table || dnode->info.inactive_table) {
|
||||
log_error(INTERNAL_ERROR
|
||||
"_remove_node called on device with loaded table(s).");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_deactivate_node(dnode->name, dnode->info.major, dnode->info.minor,
|
||||
&dnode->dtree->cookie, dnode->udev_flags, 0)) {
|
||||
log_error("Failed to clean-up device with no table: %s.",
|
||||
_node_name(dnode));
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
|
||||
{
|
||||
if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
|
||||
@@ -2685,10 +2653,6 @@ static int _writecache_emit_segment_line(struct dm_task *dmt,
|
||||
count += 1;
|
||||
if (seg->writecache_settings.nofua_set)
|
||||
count += 1;
|
||||
if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner)
|
||||
count += 1;
|
||||
if (seg->writecache_settings.max_age_set)
|
||||
count += 2;
|
||||
if (seg->writecache_settings.new_key)
|
||||
count += 2;
|
||||
|
||||
@@ -2732,14 +2696,6 @@ static int _writecache_emit_segment_line(struct dm_task *dmt,
|
||||
EMIT_PARAMS(pos, " nofua");
|
||||
}
|
||||
|
||||
if (seg->writecache_settings.cleaner_set && seg->writecache_settings.cleaner) {
|
||||
EMIT_PARAMS(pos, " cleaner");
|
||||
}
|
||||
|
||||
if (seg->writecache_settings.max_age_set) {
|
||||
EMIT_PARAMS(pos, " max_age %u", seg->writecache_settings.max_age);
|
||||
}
|
||||
|
||||
if (seg->writecache_settings.new_key) {
|
||||
EMIT_PARAMS(pos, " %s %s",
|
||||
seg->writecache_settings.new_key,
|
||||
@@ -2749,84 +2705,6 @@ static int _writecache_emit_segment_line(struct dm_task *dmt,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _integrity_emit_segment_line(struct dm_task *dmt,
|
||||
struct load_segment *seg,
|
||||
char *params, size_t paramsize)
|
||||
{
|
||||
struct integrity_settings *set = &seg->integrity_settings;
|
||||
int pos = 0;
|
||||
int count;
|
||||
char origin_dev[DM_FORMAT_DEV_BUFSIZE];
|
||||
char meta_dev[DM_FORMAT_DEV_BUFSIZE];
|
||||
|
||||
if (!_build_dev_string(origin_dev, sizeof(origin_dev), seg->origin))
|
||||
return_0;
|
||||
|
||||
if (seg->integrity_meta_node &&
|
||||
!_build_dev_string(meta_dev, sizeof(meta_dev), seg->integrity_meta_node))
|
||||
return_0;
|
||||
|
||||
count = 3; /* block_size, internal_hash, fix_padding options are always passed */
|
||||
|
||||
if (seg->integrity_meta_node)
|
||||
count++;
|
||||
|
||||
if (seg->integrity_recalculate)
|
||||
count++;
|
||||
|
||||
if (set->journal_sectors_set)
|
||||
count++;
|
||||
if (set->interleave_sectors_set)
|
||||
count++;
|
||||
if (set->buffer_sectors_set)
|
||||
count++;
|
||||
if (set->journal_watermark_set)
|
||||
count++;
|
||||
if (set->commit_time_set)
|
||||
count++;
|
||||
if (set->bitmap_flush_interval_set)
|
||||
count++;
|
||||
if (set->sectors_per_bit_set)
|
||||
count++;
|
||||
|
||||
EMIT_PARAMS(pos, "%s 0 %u %s %d fix_padding block_size:%u internal_hash:%s",
|
||||
origin_dev,
|
||||
set->tag_size,
|
||||
set->mode,
|
||||
count,
|
||||
set->block_size,
|
||||
set->internal_hash);
|
||||
|
||||
if (seg->integrity_meta_node)
|
||||
EMIT_PARAMS(pos, " meta_device:%s", meta_dev);
|
||||
|
||||
if (seg->integrity_recalculate)
|
||||
EMIT_PARAMS(pos, " recalculate");
|
||||
|
||||
if (set->journal_sectors_set)
|
||||
EMIT_PARAMS(pos, " journal_sectors:%u", set->journal_sectors);
|
||||
|
||||
if (set->interleave_sectors_set)
|
||||
EMIT_PARAMS(pos, " ineterleave_sectors:%u", set->interleave_sectors);
|
||||
|
||||
if (set->buffer_sectors_set)
|
||||
EMIT_PARAMS(pos, " buffer_sectors:%u", set->buffer_sectors);
|
||||
|
||||
if (set->journal_watermark_set)
|
||||
EMIT_PARAMS(pos, " journal_watermark:%u", set->journal_watermark);
|
||||
|
||||
if (set->commit_time_set)
|
||||
EMIT_PARAMS(pos, " commit_time:%u", set->commit_time);
|
||||
|
||||
if (set->bitmap_flush_interval_set)
|
||||
EMIT_PARAMS(pos, " bitmap_flush_interval:%u", set->bitmap_flush_interval);
|
||||
|
||||
if (set->sectors_per_bit_set)
|
||||
EMIT_PARAMS(pos, " sectors_per_bit:%llu", (unsigned long long)set->sectors_per_bit);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _thin_pool_emit_segment_line(struct dm_task *dmt,
|
||||
struct load_segment *seg,
|
||||
char *params, size_t paramsize)
|
||||
@@ -2877,7 +2755,7 @@ static int _vdo_emit_segment_line(struct dm_task *dmt,
|
||||
"maxDiscard %u ack %u bio %u bioRotationInterval %u cpu %u hash %u logical %u physical %u",
|
||||
data_dev,
|
||||
seg->vdo_data_size / 8, // this parameter is in 4K units
|
||||
seg->vdo_params.minimum_io_size * UINT32_C(512), // sector to byte units
|
||||
seg->vdo_params.minimum_io_size,
|
||||
seg->vdo_params.block_map_cache_size_mb * UINT64_C(256), // 1MiB -> 4KiB units
|
||||
seg->vdo_params.block_map_era_length,
|
||||
seg->vdo_params.use_metadata_hints ? "on" : "off" ,
|
||||
@@ -3011,10 +2889,6 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
if (!_writecache_emit_segment_line(dmt, seg, params, paramsize))
|
||||
return_0;
|
||||
break;
|
||||
case SEG_INTEGRITY:
|
||||
if (!_integrity_emit_segment_line(dmt, seg, params, paramsize))
|
||||
return_0;
|
||||
break;
|
||||
}
|
||||
|
||||
switch(seg->type) {
|
||||
@@ -3027,7 +2901,6 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
case SEG_THIN:
|
||||
case SEG_CACHE:
|
||||
case SEG_WRITECACHE:
|
||||
case SEG_INTEGRITY:
|
||||
break;
|
||||
case SEG_CRYPT:
|
||||
case SEG_LINEAR:
|
||||
@@ -3132,9 +3005,6 @@ static int _load_node(struct dm_tree_node *dnode)
|
||||
if (!dm_task_suppress_identical_reload(dmt))
|
||||
log_warn("WARNING: Failed to suppress reload of identical tables.");
|
||||
|
||||
if (dnode->props.skip_reload_params_compare)
|
||||
dm_task_skip_reload_params_compare(dmt);
|
||||
|
||||
if ((r = dm_task_run(dmt))) {
|
||||
r = dm_task_get_info(dmt, &dnode->info);
|
||||
if (r && !dnode->info.inactive_table)
|
||||
@@ -3153,8 +3023,8 @@ static int _load_node(struct dm_tree_node *dnode)
|
||||
if (!existing_table_size && dnode->props.delay_resume_if_new)
|
||||
dnode->props.size_changed = 0;
|
||||
|
||||
log_debug_activation("Table size changed from %" PRIu64 " to %" PRIu64 " for %s.%s",
|
||||
existing_table_size,
|
||||
log_debug_activation("Table size changed from %" PRIu64 " to %"
|
||||
PRIu64 " for %s.%s", existing_table_size,
|
||||
seg_start, _node_name(dnode),
|
||||
dnode->props.size_changed ? "" : " (Ignoring.)");
|
||||
|
||||
@@ -3206,16 +3076,6 @@ static int _dm_tree_revert_activated(struct dm_tree_node *parent)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _dm_tree_wait_and_revert_activated(struct dm_tree_node *dnode)
|
||||
{
|
||||
if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
|
||||
stack;
|
||||
|
||||
dm_tree_set_cookie(dnode, 0);
|
||||
|
||||
return _dm_tree_revert_activated(dnode);
|
||||
}
|
||||
|
||||
int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
const char *uuid_prefix,
|
||||
size_t uuid_prefix_len)
|
||||
@@ -3245,7 +3105,7 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
return_0;
|
||||
|
||||
/* FIXME Cope if name exists with no uuid? */
|
||||
if (!child->info.exists && !(node_created = _create_node(child, dnode)))
|
||||
if (!child->info.exists && !(node_created = _create_node(child)))
|
||||
return_0;
|
||||
|
||||
/* Propagate delayed resume from exteded child node */
|
||||
@@ -3255,22 +3115,28 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
if (!child->info.inactive_table &&
|
||||
child->props.segment_count &&
|
||||
!_load_node(child)) {
|
||||
stack;
|
||||
/*
|
||||
* If the table load fails, try to device in the kernel
|
||||
* together with other created and preloaded devices.
|
||||
* If the table load does not succeed, we remove the
|
||||
* device in the kernel that would otherwise have an
|
||||
* empty table. This makes the create + load of the
|
||||
* device atomic. However, if other dependencies have
|
||||
* already been created and loaded; this code is
|
||||
* insufficient to remove those - only the node
|
||||
* encountering the table load failure is removed.
|
||||
*/
|
||||
if (!_dm_tree_wait_and_revert_activated(dnode))
|
||||
stack;
|
||||
r = 0;
|
||||
continue;
|
||||
if (node_created) {
|
||||
if (!_remove_node(child))
|
||||
return_0;
|
||||
if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
|
||||
stack;
|
||||
dm_tree_set_cookie(dnode, 0);
|
||||
(void) _dm_tree_revert_activated(child);
|
||||
}
|
||||
return_0;
|
||||
}
|
||||
|
||||
/* No resume for a device without parents or with unchanged or smaller size */
|
||||
if (!dm_tree_node_num_children(child, 1))
|
||||
continue;
|
||||
|
||||
if (child->props.size_changed <= 0)
|
||||
if (!dm_tree_node_num_children(child, 1) || (child->props.size_changed <= 0))
|
||||
continue;
|
||||
|
||||
if (!child->info.inactive_table && !child->info.suspended)
|
||||
@@ -3281,19 +3147,28 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
|
||||
&child->info, &child->dtree->cookie, child->udev_flags,
|
||||
child->info.suspended)) {
|
||||
log_error("Unable to resume %s.", _node_name(child));
|
||||
if (!_dm_tree_wait_and_revert_activated(dnode))
|
||||
stack;
|
||||
/* If the device was not previously active, we might as well remove this node. */
|
||||
if (!child->info.live_table &&
|
||||
!_deactivate_node(child->name, child->info.major, child->info.minor,
|
||||
&child->dtree->cookie, child->udev_flags, 0))
|
||||
log_error("Unable to deactivate %s.", _node_name(child));
|
||||
r = 0;
|
||||
/* Each child is handled independently */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (node_created) {
|
||||
/* Collect newly introduced devices for revert */
|
||||
dm_list_add_h(&dnode->activated, &child->activated_list);
|
||||
|
||||
/* When creating new node also check transaction_id. */
|
||||
if (child->props.send_messages &&
|
||||
!_node_send_messages(child, uuid_prefix, uuid_prefix_len, 0)) {
|
||||
stack;
|
||||
if (!_dm_tree_wait_and_revert_activated(dnode))
|
||||
if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
|
||||
stack;
|
||||
dm_tree_set_cookie(dnode, 0);
|
||||
(void) _dm_tree_revert_activated(dnode);
|
||||
r = 0;
|
||||
continue;
|
||||
}
|
||||
@@ -3863,48 +3738,6 @@ int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dm_tree_node_add_integrity_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const char *origin_uuid,
|
||||
const char *meta_uuid,
|
||||
struct integrity_settings *settings,
|
||||
int recalculate)
|
||||
{
|
||||
struct load_segment *seg;
|
||||
|
||||
if (!(seg = _add_segment(node, SEG_INTEGRITY, size)))
|
||||
return_0;
|
||||
|
||||
if (!meta_uuid) {
|
||||
log_error("No integrity meta uuid.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(seg->integrity_meta_node = dm_tree_find_node_by_uuid(node->dtree, meta_uuid))) {
|
||||
log_error("Missing integrity's meta uuid %s.", meta_uuid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_link_tree_nodes(node, seg->integrity_meta_node))
|
||||
return_0;
|
||||
|
||||
if (!(seg->origin = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
|
||||
log_error("Missing integrity's origin uuid %s.", origin_uuid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_link_tree_nodes(node, seg->origin))
|
||||
return_0;
|
||||
|
||||
memcpy(&seg->integrity_settings, settings, sizeof(struct integrity_settings));
|
||||
|
||||
seg->integrity_recalculate = recalculate;
|
||||
|
||||
node->props.skip_reload_params_compare = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const char *rlog_uuid,
|
||||
@@ -4367,7 +4200,6 @@ int dm_tree_node_add_cache_target_base(struct dm_tree_node *node,
|
||||
|
||||
int dm_tree_node_add_vdo_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const char *vdo_pool_name,
|
||||
const char *data_uuid,
|
||||
uint64_t data_size,
|
||||
const struct dm_vdo_target_params *vtp)
|
||||
@@ -4389,7 +4221,7 @@ int dm_tree_node_add_vdo_target(struct dm_tree_node *node,
|
||||
return_0;
|
||||
|
||||
seg->vdo_params = *vtp;
|
||||
seg->vdo_name = vdo_pool_name;
|
||||
seg->vdo_name = node->name;
|
||||
seg->vdo_data_size = data_size;
|
||||
|
||||
node->props.send_messages = 2;
|
||||
|
||||
@@ -749,11 +749,10 @@ static void _display_fields_more(struct dm_report *rh,
|
||||
id_len = strlen(type->prefix) + 3;
|
||||
|
||||
for (f = 0; fields[f].report_fn; f++) {
|
||||
if (!(type = _find_type(rh, fields[f].type))) {
|
||||
log_debug(INTERNAL_ERROR "Field type undefined.");
|
||||
continue;
|
||||
}
|
||||
desc = (type->desc) ? : " ";
|
||||
if ((type = _find_type(rh, fields[f].type)) && type->desc)
|
||||
desc = type->desc;
|
||||
else
|
||||
desc = " ";
|
||||
if (desc != last_desc) {
|
||||
if (*last_desc)
|
||||
log_warn(" ");
|
||||
|
||||
@@ -296,8 +296,6 @@ int dm_get_status_cache(struct dm_pool *mem, const char *params,
|
||||
s->feature_flags |= DM_CACHE_FEATURE_PASSTHROUGH;
|
||||
else if (!strncmp(p, "metadata2 ", 10))
|
||||
s->feature_flags |= DM_CACHE_FEATURE_METADATA2;
|
||||
else if (!strncmp(p, "no_discard_passdown ", 20))
|
||||
s->feature_flags |= DM_CACHE_FEATURE_NO_DISCARD_PASSDOWN;
|
||||
else
|
||||
log_error("Unknown feature in status: %s", params);
|
||||
|
||||
@@ -366,8 +364,8 @@ int dm_get_status_writecache(struct dm_pool *mem, const char *params,
|
||||
if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_writecache))))
|
||||
return_0;
|
||||
|
||||
if (sscanf(params, "%llu %llu %llu %llu",
|
||||
(unsigned long long *)&s->error,
|
||||
if (sscanf(params, "%u %llu %llu %llu",
|
||||
&s->error,
|
||||
(unsigned long long *)&s->total_blocks,
|
||||
(unsigned long long *)&s->free_blocks,
|
||||
(unsigned long long *)&s->writeback_blocks) != 4) {
|
||||
@@ -380,33 +378,6 @@ int dm_get_status_writecache(struct dm_pool *mem, const char *params,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dm_get_status_integrity(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_integrity **status)
|
||||
{
|
||||
struct dm_status_integrity *s;
|
||||
char recalc_str[16] = "\0";
|
||||
|
||||
if (!(s = dm_pool_zalloc(mem, sizeof(*s))))
|
||||
return_0;
|
||||
|
||||
if (sscanf(params, "%llu %llu %s",
|
||||
(unsigned long long *)&s->number_of_mismatches,
|
||||
(unsigned long long *)&s->provided_data_sectors,
|
||||
recalc_str) != 3) {
|
||||
log_error("Failed to parse integrity params: %s.", params);
|
||||
dm_pool_free(mem, s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (recalc_str[0] == '-')
|
||||
s->recalc_sector = 0;
|
||||
else
|
||||
s->recalc_sector = strtoull(recalc_str, NULL, 0);
|
||||
|
||||
*status = s;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int parse_thin_pool_status(const char *params, struct dm_status_thin_pool *s)
|
||||
{
|
||||
int pos;
|
||||
|
||||
@@ -183,7 +183,7 @@ struct dm_target_spec {
|
||||
struct dm_target_deps {
|
||||
uint32_t count; /* Array size */
|
||||
uint32_t padding; /* unused */
|
||||
uint64_t dev[]; /* out */
|
||||
uint64_t dev[0]; /* out */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -193,7 +193,7 @@ struct dm_name_list {
|
||||
uint64_t dev;
|
||||
uint32_t next; /* offset to the next record from
|
||||
the _start_ of this */
|
||||
char name[];
|
||||
char name[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -203,7 +203,7 @@ struct dm_target_versions {
|
||||
uint32_t next;
|
||||
uint32_t version[3];
|
||||
|
||||
char name[];
|
||||
char name[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -212,7 +212,7 @@ struct dm_target_versions {
|
||||
struct dm_target_msg {
|
||||
uint64_t sector; /* Device sector */
|
||||
|
||||
char message[];
|
||||
char message[0];
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -244,7 +244,6 @@ enum {
|
||||
DM_TARGET_MSG_CMD,
|
||||
DM_DEV_SET_GEOMETRY_CMD,
|
||||
DM_DEV_ARM_POLL_CMD,
|
||||
DM_GET_TARGET_VERSION_CMD,
|
||||
};
|
||||
|
||||
#define DM_IOCTL 0xfd
|
||||
@@ -271,8 +270,6 @@ enum {
|
||||
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
|
||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_GET_TARGET_VERSION _IOWR(DM_IOCTL, DM_GET_TARGET_VERSION_CMD, struct dm_ioctl)
|
||||
|
||||
#define DM_VERSION_MAJOR 4
|
||||
#define DM_VERSION_MINOR 36
|
||||
#define DM_VERSION_PATCHLEVEL 0
|
||||
|
||||
@@ -98,7 +98,7 @@ void dm_pools_check_leaks(void)
|
||||
p->orig_pool,
|
||||
p->name, p->stats.bytes);
|
||||
#else
|
||||
log_error(" [%p] %s", (void *)p, p->name);
|
||||
log_error(" [%p] %s", p, p->name);
|
||||
#endif
|
||||
}
|
||||
pthread_mutex_unlock(&_dm_pools_mutex);
|
||||
|
||||
@@ -74,7 +74,7 @@ enum dm_vdo_write_policy {
|
||||
|
||||
// FIXME: review whether we should use the createParams from the userlib
|
||||
struct dm_vdo_target_params {
|
||||
uint32_t minimum_io_size; // in sectors
|
||||
uint32_t minimum_io_size;
|
||||
uint32_t block_map_cache_size_mb;
|
||||
uint32_t block_map_era_length; // format period
|
||||
|
||||
|
||||
@@ -23,9 +23,8 @@ bool dm_vdo_validate_target_params(const struct dm_vdo_target_params *vtp,
|
||||
{
|
||||
bool valid = true;
|
||||
|
||||
/* 512 or 4096 bytes only ATM */
|
||||
if ((vtp->minimum_io_size != 1) &&
|
||||
(vtp->minimum_io_size != 8)) {
|
||||
if ((vtp->minimum_io_size != 512) &&
|
||||
(vtp->minimum_io_size != 4096)) {
|
||||
log_error("VDO minimum io size %u is unsupported.",
|
||||
vtp->minimum_io_size);
|
||||
valid = false;
|
||||
|
||||
@@ -126,9 +126,6 @@
|
||||
/* Library version */
|
||||
#undef DM_LIB_VERSION
|
||||
|
||||
/* Define to 1 to include the LVM editline shell. */
|
||||
#undef EDITLINE_SUPPORT
|
||||
|
||||
/* Path to fsadm binary. */
|
||||
#undef FSADM_PATH
|
||||
|
||||
@@ -154,9 +151,6 @@
|
||||
/* Define to 1 if you have the `atexit' function. */
|
||||
#undef HAVE_ATEXIT
|
||||
|
||||
/* Define if ioctl BLKZEROOUT can be used for device zeroing. */
|
||||
#undef HAVE_BLKZEROOUT
|
||||
|
||||
/* Define to 1 if canonicalize_file_name is available. */
|
||||
#undef HAVE_CANONICALIZE_FILE_NAME
|
||||
|
||||
@@ -182,12 +176,6 @@
|
||||
/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
|
||||
#undef HAVE_DOPRNT
|
||||
|
||||
/* Define to 1 if you have the <editline/history.h> header file. */
|
||||
#undef HAVE_EDITLINE_HISTORY_H
|
||||
|
||||
/* Define to 1 if you have the <editline/readline.h> header file. */
|
||||
#undef HAVE_EDITLINE_READLINE_H
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#undef HAVE_ERRNO_H
|
||||
|
||||
@@ -304,12 +292,6 @@
|
||||
/* Define to 1 if you have the <paths.h> header file. */
|
||||
#undef HAVE_PATHS_H
|
||||
|
||||
/* Define to 1 if you have the `prlimit' function. */
|
||||
#undef HAVE_PRLIMIT
|
||||
|
||||
/* Define to 1 if you have the `pselect' function. */
|
||||
#undef HAVE_PSELECT
|
||||
|
||||
/* Define to 1 if the system has the type `ptrdiff_t'. */
|
||||
#undef HAVE_PTRDIFF_T
|
||||
|
||||
@@ -543,18 +525,12 @@
|
||||
/* Define to 1 if the system has the `__builtin_clzll' built-in function */
|
||||
#undef HAVE___BUILTIN_CLZLL
|
||||
|
||||
/* Define to 1 to include built-in support for integrity. */
|
||||
#undef INTEGRITY_INTERNAL
|
||||
|
||||
/* Internalization package */
|
||||
#undef INTL_PACKAGE
|
||||
|
||||
/* Locale-dependent data */
|
||||
#undef LOCALEDIR
|
||||
|
||||
/* Define to 1 to include code that uses lvmlockd dlm control option. */
|
||||
#undef LOCKDDLM_CONTROL_SUPPORT
|
||||
|
||||
/* Define to 1 to include code that uses lvmlockd dlm option. */
|
||||
#undef LOCKDDLM_SUPPORT
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ SOURCES =\
|
||||
activate/activate.c \
|
||||
cache/lvmcache.c \
|
||||
writecache/writecache.c \
|
||||
integrity/integrity.c \
|
||||
cache_segtype/cache.c \
|
||||
commands/toolcontext.c \
|
||||
config/config.c \
|
||||
@@ -67,16 +66,14 @@ SOURCES =\
|
||||
locking/locking.c \
|
||||
log/log.c \
|
||||
metadata/cache_manip.c \
|
||||
metadata/writecache_manip.c \
|
||||
metadata/integrity_manip.c \
|
||||
metadata/lv.c \
|
||||
metadata/lv_manip.c \
|
||||
metadata/merge.c \
|
||||
metadata/metadata.c \
|
||||
metadata/read.c \
|
||||
metadata/mirror.c \
|
||||
metadata/pool_manip.c \
|
||||
metadata/pv.c \
|
||||
metadata/pv_list.c \
|
||||
metadata/pv_manip.c \
|
||||
metadata/pv_map.c \
|
||||
metadata/raid_manip.c \
|
||||
|
||||
@@ -185,8 +185,8 @@ void set_activation(int act, int silent)
|
||||
if (warned || !act)
|
||||
return;
|
||||
|
||||
log_warn("WARNING: Compiled without libdevmapper support. "
|
||||
"Can't enable activation.");
|
||||
log_error("Compiled without libdevmapper support. "
|
||||
"Can't enable activation.");
|
||||
|
||||
warned = 1;
|
||||
}
|
||||
@@ -221,13 +221,23 @@ int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_la
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
const struct lv_segment *lv_seg,
|
||||
int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
|
||||
struct lvinfo *info, int with_open_count, int with_read_ahead)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_info_with_seg_status(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
const struct lv_segment *lv_seg, int use_layer,
|
||||
struct lv_with_info_and_seg_status *status,
|
||||
int with_open_count, int with_read_ahead)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_status(struct cmd_context *cmd, const struct lv_segment *lv_seg,
|
||||
int use_layer, struct lv_seg_status *lv_seg_status)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_cache_status(const struct logical_volume *cache_lv,
|
||||
struct lv_status_cache **status)
|
||||
{
|
||||
@@ -274,17 +284,18 @@ int lv_raid_message(const struct logical_volume *lv, const char *msg)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_writecache_message(const struct logical_volume *lv, const char *msg)
|
||||
int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
|
||||
dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_thin_pool_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin_pool **thin_pool_status)
|
||||
int lv_thin_percent(const struct logical_volume *lv, int mapped,
|
||||
dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_thin_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin **thin_status)
|
||||
int lv_thin_pool_transaction_id(const struct logical_volume *lv,
|
||||
uint64_t *transaction_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -292,15 +303,6 @@ int lv_thin_device_id(const struct logical_volume *lv, uint32_t *device_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_vdo **vdo_status)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_vdo_pool_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lvs_in_vg_activated(const struct volume_group *vg)
|
||||
{
|
||||
return 0;
|
||||
@@ -618,7 +620,7 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int use_layer, struct lvinfo *info,
|
||||
const struct lv_segment *seg,
|
||||
struct lv_seg_status *seg_status,
|
||||
int with_open_count, int with_read_ahead, int with_name_check)
|
||||
int with_open_count, int with_read_ahead)
|
||||
{
|
||||
struct dm_info dminfo;
|
||||
|
||||
@@ -636,7 +638,7 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
/* New thin-pool has no layer, but -tpool suffix needs to be queried */
|
||||
if (!use_layer && lv_is_new_thin_pool(lv)) {
|
||||
/* Check if there isn't existing old thin pool mapping in the table */
|
||||
if (!dev_manager_info(cmd, lv, NULL, 0, 0, 0, &dminfo, NULL, NULL))
|
||||
if (!dev_manager_info(cmd, lv, NULL, 0, 0, &dminfo, NULL, NULL))
|
||||
return_0;
|
||||
if (!dminfo.exists)
|
||||
use_layer = 1;
|
||||
@@ -649,9 +651,8 @@ static int _lv_info(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
|
||||
if (!dev_manager_info(cmd, lv,
|
||||
(use_layer) ? lv_layer(lv) : NULL,
|
||||
with_open_count, with_read_ahead, with_name_check,
|
||||
&dminfo,
|
||||
(info) ? &info->read_ahead : NULL,
|
||||
with_open_count, with_read_ahead,
|
||||
&dminfo, (info) ? &info->read_ahead : NULL,
|
||||
seg_status))
|
||||
return_0;
|
||||
|
||||
@@ -680,16 +681,7 @@ int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_la
|
||||
if (!activation())
|
||||
return 0;
|
||||
|
||||
return _lv_info(cmd, lv, use_layer, info, NULL, NULL, with_open_count, with_read_ahead, 0);
|
||||
}
|
||||
|
||||
int lv_info_with_name_check(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int use_layer, struct lvinfo *info)
|
||||
{
|
||||
if (!activation())
|
||||
return 0;
|
||||
|
||||
return _lv_info(cmd, lv, use_layer, info, NULL, NULL, 0, 0, 1);
|
||||
return _lv_info(cmd, lv, use_layer, info, NULL, NULL, with_open_count, with_read_ahead);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -719,16 +711,16 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
* STATUS is collected from cache LV */
|
||||
if (!(lv_seg = get_only_segment_using_this_lv(lv)))
|
||||
return_0;
|
||||
(void) _lv_info(cmd, lv_seg->lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0);
|
||||
(void) _lv_info(cmd, lv_seg->lv, 1, NULL, lv_seg, &status->seg_status, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (lv_is_thin_pool(lv)) {
|
||||
/* Always collect status for '-tpool' */
|
||||
if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0, 0) &&
|
||||
if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0) &&
|
||||
(status->seg_status.type == SEG_STATUS_THIN_POOL)) {
|
||||
/* There is -tpool device, but query 'active' state of 'fake' thin-pool */
|
||||
if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0, 0) &&
|
||||
if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0) &&
|
||||
!status->seg_status.thin_pool->needs_check)
|
||||
status->info.exists = 0; /* So pool LV is not active */
|
||||
}
|
||||
@@ -737,10 +729,10 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
|
||||
if (lv_is_external_origin(lv)) {
|
||||
if (!_lv_info(cmd, lv, 0, &status->info, NULL, NULL,
|
||||
with_open_count, with_read_ahead, 0))
|
||||
with_open_count, with_read_ahead))
|
||||
return_0;
|
||||
|
||||
(void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0);
|
||||
(void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -753,13 +745,13 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
/* Show INFO for actual origin and grab status for merging origin */
|
||||
if (!_lv_info(cmd, lv, 0, &status->info, lv_seg,
|
||||
lv_is_merging_origin(lv) ? &status->seg_status : NULL,
|
||||
with_open_count, with_read_ahead, 0))
|
||||
with_open_count, with_read_ahead))
|
||||
return_0;
|
||||
|
||||
if (status->info.exists &&
|
||||
(status->seg_status.type != SEG_STATUS_SNAPSHOT)) /* Not merging */
|
||||
/* Grab STATUS from layered -real */
|
||||
(void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0, 0);
|
||||
(void) _lv_info(cmd, lv, 1, NULL, lv_seg, &status->seg_status, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -768,11 +760,10 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
olv = origin_from_cow(lv);
|
||||
|
||||
if (!_lv_info(cmd, olv, 0, &status->info, first_seg(olv), &status->seg_status,
|
||||
with_open_count, with_read_ahead, 0))
|
||||
with_open_count, with_read_ahead))
|
||||
return_0;
|
||||
|
||||
if (status->seg_status.type == SEG_STATUS_SNAPSHOT ||
|
||||
(lv_is_thin_volume(olv) && (status->seg_status.type == SEG_STATUS_THIN))) {
|
||||
if (status->seg_status.type == SEG_STATUS_SNAPSHOT) {
|
||||
log_debug_activation("Snapshot merge is in progress, querying status of %s instead.",
|
||||
display_lvname(lv));
|
||||
/*
|
||||
@@ -790,33 +781,21 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
|
||||
|
||||
if (lv_is_vdo(lv)) {
|
||||
if (!_lv_info(cmd, lv, 0, &status->info, NULL, NULL,
|
||||
with_open_count, with_read_ahead, 0))
|
||||
with_open_count, with_read_ahead))
|
||||
return_0;
|
||||
if (status->info.exists) {
|
||||
/* Status for VDO pool */
|
||||
(void) _lv_info(cmd, seg_lv(lv_seg, 0), 1, NULL,
|
||||
first_seg(seg_lv(lv_seg, 0)),
|
||||
&status->seg_status, 0, 0, 0);
|
||||
&status->seg_status, 0, 0);
|
||||
/* Use VDO pool segtype result for VDO segtype */
|
||||
status->seg_status.seg = lv_seg;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (lv_is_vdo_pool(lv)) {
|
||||
/* Always collect status for '-vpool' */
|
||||
if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0, 0) &&
|
||||
(status->seg_status.type == SEG_STATUS_VDO_POOL)) {
|
||||
/* There is -tpool device, but query 'active' state of 'fake' vdo-pool */
|
||||
if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0, 0))
|
||||
status->info.exists = 0; /* So VDO pool LV is not active */
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return _lv_info(cmd, lv, 0, &status->info, lv_seg, &status->seg_status,
|
||||
with_open_count, with_read_ahead, 0);
|
||||
with_open_count, with_read_ahead);
|
||||
}
|
||||
|
||||
#define OPEN_COUNT_CHECK_RETRIES 25
|
||||
@@ -1246,52 +1225,86 @@ int lv_cache_status(const struct logical_volume *cache_lv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int lv_thin_pool_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin_pool **thin_pool_status)
|
||||
/*
|
||||
* Returns data or metadata percent usage, depends on metadata 0/1.
|
||||
* Returns 1 if percent set, else 0 on failure.
|
||||
*/
|
||||
int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
|
||||
dm_percent_t *percent)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking thin pool status for LV %s.",
|
||||
display_lvname(lv));
|
||||
log_debug_activation("Checking thin %sdata percent for LV %s.",
|
||||
(metadata) ? "meta" : "", display_lvname(lv));
|
||||
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!dev_manager_thin_pool_status(dm, lv, flush, thin_pool_status)) {
|
||||
dev_manager_destroy(dm);
|
||||
return_0;
|
||||
}
|
||||
if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
|
||||
stack;
|
||||
|
||||
/* User has to call dm_pool_destroy(thin_pool_status->mem)! */
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return 1;
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_thin_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin **thin_status)
|
||||
/*
|
||||
* Returns 1 if percent set, else 0 on failure.
|
||||
*/
|
||||
int lv_thin_percent(const struct logical_volume *lv,
|
||||
int mapped, dm_percent_t *percent)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking thin status for LV %s.",
|
||||
log_debug_activation("Checking thin percent for LV %s.",
|
||||
display_lvname(lv));
|
||||
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!dev_manager_thin_status(dm, lv, flush, thin_status)) {
|
||||
dev_manager_destroy(dm);
|
||||
if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
|
||||
stack;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if transaction_id set, else 0 on failure.
|
||||
*/
|
||||
int lv_thin_pool_transaction_id(const struct logical_volume *lv,
|
||||
uint64_t *transaction_id)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_thin_pool *status;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking thin-pool transaction id for LV %s.",
|
||||
display_lvname(lv));
|
||||
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
}
|
||||
|
||||
/* User has to call dm_pool_destroy(thin_status->mem)! */
|
||||
if (!(r = dev_manager_thin_pool_status(dm, lv, &status, 0)))
|
||||
stack;
|
||||
else
|
||||
*transaction_id = status->transaction_id;
|
||||
|
||||
return 1;
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_thin_device_id(const struct logical_volume *lv, uint32_t *device_id)
|
||||
@@ -1329,7 +1342,7 @@ int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
|
||||
int r = 0;
|
||||
struct dev_manager *dm;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking VDO pool status for LV %s.",
|
||||
@@ -1579,8 +1592,6 @@ static char *_build_target_uuid(struct cmd_context *cmd, const struct logical_vo
|
||||
|
||||
if (lv_is_thin_pool(lv))
|
||||
layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
|
||||
else if (lv_is_vdo_pool(lv))
|
||||
layer = "vpool"; /* Monitor "vpool" for the "VDO pool". */
|
||||
else if (lv_is_origin(lv) || lv_is_external_origin(lv))
|
||||
layer = "real"; /* Monitor "real" for "snapshot-origin". */
|
||||
else
|
||||
@@ -2161,6 +2172,8 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
|
||||
if (laopts->origin_only && lv_is_thin_volume(lv) && lv_is_thin_volume(lv_pre))
|
||||
lockfs = 1;
|
||||
|
||||
critical_section_inc(cmd, "suspending");
|
||||
|
||||
if (!lv_is_locked(lv) && lv_is_locked(lv_pre) &&
|
||||
(pvmove_lv = find_pvmove_lv_in_lv(lv_pre))) {
|
||||
/*
|
||||
@@ -2202,23 +2215,16 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
|
||||
}
|
||||
dm_list_add(&suspend_lvs, &lvl->list);
|
||||
}
|
||||
|
||||
critical_section_inc(cmd, "suspending");
|
||||
|
||||
dm_list_iterate_items(lvl, &suspend_lvs)
|
||||
if (!_lv_suspend_lv(lvl->lv, laopts, lockfs, 1)) {
|
||||
critical_section_dec(cmd, "failed suspend");
|
||||
goto_out; /* FIXME: resume on recovery path? */
|
||||
}
|
||||
|
||||
} else { /* Standard suspend */
|
||||
critical_section_inc(cmd, "suspending");
|
||||
|
||||
} else /* Standard suspend */
|
||||
if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
|
||||
critical_section_dec(cmd, "failed suspend");
|
||||
goto_out;
|
||||
}
|
||||
}
|
||||
|
||||
r = 1;
|
||||
out:
|
||||
@@ -2238,8 +2244,8 @@ int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned o
|
||||
const struct logical_volume *lv, const struct logical_volume *lv_pre)
|
||||
{
|
||||
struct lv_activate_opts laopts = {
|
||||
.exclusive = exclusive,
|
||||
.origin_only = origin_only
|
||||
.origin_only = origin_only,
|
||||
.exclusive = exclusive
|
||||
};
|
||||
|
||||
return _lv_suspend(cmd, lvid_s, &laopts, 0, lv, lv_pre);
|
||||
@@ -2291,9 +2297,6 @@ static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
|
||||
lv_is_thin_volume(lv) ? " thin only" : " without snapshots") : "",
|
||||
laopts->revert ? " (reverting)" : "");
|
||||
|
||||
if (laopts->revert)
|
||||
goto needs_resume;
|
||||
|
||||
if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
|
||||
goto_out;
|
||||
|
||||
@@ -2353,8 +2356,8 @@ int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
|
||||
unsigned revert, const struct logical_volume *lv)
|
||||
{
|
||||
struct lv_activate_opts laopts = {
|
||||
.exclusive = exclusive,
|
||||
.origin_only = origin_only,
|
||||
.exclusive = exclusive,
|
||||
.revert = revert
|
||||
};
|
||||
|
||||
@@ -2419,17 +2422,6 @@ int lv_deactivate(struct cmd_context *cmd, const char *lvid_s, const struct logi
|
||||
}
|
||||
}
|
||||
|
||||
if (lv_is_vdo_pool(lv)) {
|
||||
/* If someone has remove 'linear' mapping over VDO device
|
||||
* we may still be able to deactivate the rest of the tree
|
||||
* i.e. in test-suite we simulate this via 'dmsetup remove' */
|
||||
if (!lv_info(cmd, lv, 1, &info, 1, 0))
|
||||
goto_out;
|
||||
|
||||
if (info.exists && !info.open_count)
|
||||
r = 0; /* Unused VDO device left in table? */
|
||||
}
|
||||
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
@@ -2507,13 +2499,6 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((cmd->partial_activation || cmd->degraded_activation) &&
|
||||
lv_is_partial(lv) && lv_is_raid(lv) && lv_raid_has_integrity((struct logical_volume *)lv)) {
|
||||
cmd->partial_activation = 0;
|
||||
cmd->degraded_activation = 0;
|
||||
log_print("No degraded or partial activation for raid with integrity.");
|
||||
}
|
||||
|
||||
if ((!lv->vg->cmd->partial_activation) && lv_is_partial(lv)) {
|
||||
if (!lv_is_raid_type(lv) || !partial_raid_lv_supports_degraded_activation(lv)) {
|
||||
log_error("Refusing activation of partial LV %s. "
|
||||
@@ -2530,14 +2515,6 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
|
||||
}
|
||||
}
|
||||
|
||||
if ((cmd->partial_activation || cmd->degraded_activation) && lv_is_writecache(lv)) {
|
||||
struct logical_volume *lv_fast = first_seg(lv)->writecache;
|
||||
if (lv_is_partial(lv) || (lv_fast && lv_is_partial(lv_fast))) {
|
||||
log_error("Cannot use partial or degraded activation with writecache.");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (lv_has_unknown_segments(lv)) {
|
||||
log_error("Refusing activation of LV %s containing "
|
||||
"an unrecognised segment.", display_lvname(lv));
|
||||
@@ -2570,7 +2547,7 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
|
||||
laopts->noscan ? " noscan" : "",
|
||||
laopts->temporary ? " temporary" : "");
|
||||
|
||||
if (!lv_info_with_name_check(cmd, lv, 0, &info))
|
||||
if (!lv_info(cmd, lv, 0, &info, 0, 0))
|
||||
goto_out;
|
||||
|
||||
/*
|
||||
@@ -2942,7 +2919,8 @@ int revert_lv(struct cmd_context *cmd, const struct logical_volume *lv)
|
||||
|
||||
ret = lv_resume_if_active(cmd, NULL, 0, 0, 1, lv_committed(lv));
|
||||
|
||||
critical_section_dec(cmd, "unlocking on revert");
|
||||
critical_section_dec(cmd, "unlocking on resume");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@ typedef enum {
|
||||
SEG_STATUS_THIN_POOL,
|
||||
SEG_STATUS_VDO_POOL,
|
||||
SEG_STATUS_WRITECACHE,
|
||||
SEG_STATUS_INTEGRITY,
|
||||
SEG_STATUS_UNKNOWN
|
||||
} lv_seg_status_type_t;
|
||||
|
||||
@@ -54,7 +53,6 @@ struct lv_seg_status {
|
||||
struct dm_status_thin *thin;
|
||||
struct dm_status_thin_pool *thin_pool;
|
||||
struct dm_status_writecache *writecache;
|
||||
struct dm_status_integrity *integrity;
|
||||
struct lv_status_vdo vdo_pool;
|
||||
};
|
||||
};
|
||||
@@ -146,8 +144,8 @@ int revert_lv(struct cmd_context *cmd, const struct logical_volume *lv);
|
||||
*/
|
||||
int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
|
||||
struct lvinfo *info, int with_open_count, int with_read_ahead);
|
||||
int lv_info_with_name_check(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int use_layer, struct lvinfo *info);
|
||||
int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
|
||||
struct lvinfo *info, int with_open_count, int with_read_ahead);
|
||||
|
||||
/*
|
||||
* Returns 1 if lv_info_and_seg_status structure has been populated,
|
||||
@@ -191,11 +189,13 @@ int lv_raid_message(const struct logical_volume *lv, const char *msg);
|
||||
int lv_writecache_message(const struct logical_volume *lv, const char *msg);
|
||||
int lv_cache_status(const struct logical_volume *cache_lv,
|
||||
struct lv_status_cache **status);
|
||||
int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
|
||||
dm_percent_t *percent);
|
||||
int lv_thin_percent(const struct logical_volume *lv, int mapped,
|
||||
dm_percent_t *percent);
|
||||
int lv_thin_pool_transaction_id(const struct logical_volume *lv,
|
||||
uint64_t *transaction_id);
|
||||
int lv_thin_device_id(const struct logical_volume *lv, uint32_t *device_id);
|
||||
int lv_thin_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin **status);
|
||||
int lv_thin_pool_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin_pool **status);
|
||||
int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
|
||||
struct lv_status_vdo **status);
|
||||
int lv_vdo_pool_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
@@ -260,7 +260,6 @@ void fs_unlock(void);
|
||||
|
||||
#define TARGET_NAME_CACHE "cache"
|
||||
#define TARGET_NAME_WRITECACHE "writecache"
|
||||
#define TARGET_NAME_INTEGRITY "integrity"
|
||||
#define TARGET_NAME_ERROR "error"
|
||||
#define TARGET_NAME_ERROR_OLD "erro" /* Truncated in older kernels */
|
||||
#define TARGET_NAME_LINEAR "linear"
|
||||
@@ -278,7 +277,6 @@ void fs_unlock(void);
|
||||
#define MODULE_NAME_CLUSTERED_MIRROR "clog"
|
||||
#define MODULE_NAME_CACHE TARGET_NAME_CACHE
|
||||
#define MODULE_NAME_WRITECACHE TARGET_NAME_WRITECACHE
|
||||
#define MODULE_NAME_INTEGRITY TARGET_NAME_INTEGRITY
|
||||
#define MODULE_NAME_ERROR TARGET_NAME_ERROR
|
||||
#define MODULE_NAME_LOG_CLUSTERED "log-clustered"
|
||||
#define MODULE_NAME_LOG_USERSPACE "log-userspace"
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
#define MAX_TARGET_PARAMSIZE 50000
|
||||
#define LVM_UDEV_NOSCAN_FLAG DM_SUBSYSTEM_UDEV_FLAG0
|
||||
#define CRYPT_TEMP "CRYPT-TEMP"
|
||||
#define CRYPT_SUBDEV "CRYPT-SUBDEV"
|
||||
#define STRATIS "stratis-"
|
||||
|
||||
typedef enum {
|
||||
@@ -46,7 +45,7 @@ typedef enum {
|
||||
} action_t;
|
||||
|
||||
/* This list must match lib/misc/lvm-string.c:build_dm_uuid(). */
|
||||
const char *uuid_suffix_list[] = { "pool", "cdata", "cmeta", "cvol", "tdata", "tmeta", "vdata", "vpool", "imeta", NULL};
|
||||
const char *uuid_suffix_list[] = { "pool", "cdata", "cmeta", "tdata", "tmeta", "vdata", "vpool", NULL};
|
||||
|
||||
struct dlid_list {
|
||||
struct dm_list list;
|
||||
@@ -65,6 +64,7 @@ struct dev_manager {
|
||||
int activation; /* building activation tree */
|
||||
int suspend; /* building suspend tree */
|
||||
unsigned track_external_lv_deps;
|
||||
struct dm_list pending_delete; /* str_list of dlid(s) with pending delete */
|
||||
unsigned track_pending_delete;
|
||||
unsigned track_pvmove_deps;
|
||||
|
||||
@@ -85,11 +85,6 @@ int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts
|
||||
if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv))
|
||||
return 0; /* Keep RAID SubLvs writable */
|
||||
|
||||
if (!layer) {
|
||||
if (lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return (laopts->read_only || !(lv->status & LVM_WRITE));
|
||||
}
|
||||
|
||||
@@ -222,10 +217,6 @@ static int _get_segment_status_from_target_params(const char *target_name,
|
||||
if (!dm_get_status_writecache(seg_status->mem, params, &(seg_status->writecache)))
|
||||
return_0;
|
||||
seg_status->type = SEG_STATUS_WRITECACHE;
|
||||
} else if (segtype_is_integrity(segtype)) {
|
||||
if (!dm_get_status_integrity(seg_status->mem, params, &(seg_status->integrity)))
|
||||
return_0;
|
||||
seg_status->type = SEG_STATUS_INTEGRITY;
|
||||
} else
|
||||
/*
|
||||
* TODO: Add support for other segment types too!
|
||||
@@ -252,7 +243,6 @@ static uint32_t _seg_len(const struct lv_segment *seg)
|
||||
static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status,
|
||||
const char *name_check,
|
||||
int with_open_count, int with_read_ahead,
|
||||
uint32_t major, uint32_t minor)
|
||||
{
|
||||
@@ -263,7 +253,6 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
void *target = NULL;
|
||||
uint64_t target_start, target_length, start, length;
|
||||
char *target_name, *target_params;
|
||||
const char *devname;
|
||||
|
||||
if (seg_status) {
|
||||
dmtask = DM_DEVICE_STATUS;
|
||||
@@ -277,12 +266,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
with_open_count, with_flush, 0)))
|
||||
return_0;
|
||||
|
||||
if (name_check && dminfo->exists &&
|
||||
(devname = dm_task_get_name(dmt)) &&
|
||||
(strcmp(name_check, devname) != 0))
|
||||
dminfo->exists = 0; /* mismatching name -> device does not exist */
|
||||
|
||||
if (with_read_ahead && read_ahead && dminfo->exists) {
|
||||
if (with_read_ahead && dminfo->exists) {
|
||||
if (!dm_task_get_read_ahead(dmt, read_ahead))
|
||||
goto_out;
|
||||
} else if (read_ahead)
|
||||
@@ -303,9 +287,6 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
if (lv_is_vdo_pool(seg_status->seg->lv))
|
||||
length = get_vdo_pool_virtual_size(seg_status->seg);
|
||||
|
||||
if (lv_is_integrity(seg_status->seg->lv))
|
||||
length = seg_status->seg->integrity_data_sectors;
|
||||
|
||||
do {
|
||||
target = dm_get_next_target(dmt, target, &target_start,
|
||||
&target_length, &target_name, &target_params);
|
||||
@@ -678,7 +659,6 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check)
|
||||
|
||||
if (check.check_reserved && uuid &&
|
||||
(!strncmp(uuid, CRYPT_TEMP, sizeof(CRYPT_TEMP) - 1) ||
|
||||
!strncmp(uuid, CRYPT_SUBDEV, sizeof(CRYPT_SUBDEV) - 1) ||
|
||||
!strncmp(uuid, STRATIS, sizeof(STRATIS) - 1))) {
|
||||
/* Skip private crypto devices */
|
||||
log_debug_activation("%s: Reserved uuid %s on %s device %s not usable.",
|
||||
@@ -805,19 +785,18 @@ static int _original_uuid_format_check_required(struct cmd_context *cmd)
|
||||
|
||||
static int _info(struct cmd_context *cmd,
|
||||
const char *name, const char *dlid,
|
||||
int with_open_count, int with_read_ahead, int with_name_check,
|
||||
int with_open_count, int with_read_ahead,
|
||||
struct dm_info *dminfo, uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status)
|
||||
{
|
||||
char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN];
|
||||
const char *suffix, *suffix_position;
|
||||
const char *name_check = (with_name_check) ? name : NULL;
|
||||
unsigned i = 0;
|
||||
|
||||
log_debug_activation("Getting device info for %s [%s].", name, dlid);
|
||||
|
||||
/* Check for dlid */
|
||||
if (!_info_run(dlid, dminfo, read_ahead, seg_status, name_check,
|
||||
if (!_info_run(dlid, dminfo, read_ahead, seg_status,
|
||||
with_open_count, with_read_ahead, 0, 0))
|
||||
return_0;
|
||||
|
||||
@@ -833,8 +812,7 @@ static int _info(struct cmd_context *cmd,
|
||||
(void) strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
|
||||
old_style_dlid[sizeof(old_style_dlid) - 1] = '\0';
|
||||
if (!_info_run(old_style_dlid, dminfo, read_ahead, seg_status,
|
||||
name_check, with_open_count, with_read_ahead,
|
||||
0, 0))
|
||||
with_open_count, with_read_ahead, 0, 0))
|
||||
return_0;
|
||||
if (dminfo->exists)
|
||||
return 1;
|
||||
@@ -847,12 +825,97 @@ static int _info(struct cmd_context *cmd,
|
||||
|
||||
/* Check for dlid before UUID_PREFIX was added */
|
||||
if (!_info_run(dlid + sizeof(UUID_PREFIX) - 1, dminfo, read_ahead, seg_status,
|
||||
name_check, with_open_count, with_read_ahead, 0, 0))
|
||||
with_open_count, with_read_ahead, 0, 0))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* FIXME: could we just use dev_manager_info instead of this? */
|
||||
|
||||
int get_cache_vol_meta_data(struct cmd_context *cmd,
|
||||
struct logical_volume *lv,
|
||||
struct logical_volume *pool_lv,
|
||||
struct dm_info *info_meta, struct dm_info *info_data)
|
||||
{
|
||||
struct lv_segment *lvseg = first_seg(lv);
|
||||
union lvid lvid_meta;
|
||||
union lvid lvid_data;
|
||||
char *name_meta;
|
||||
char *name_data;
|
||||
char *dlid_meta;
|
||||
char *dlid_data;
|
||||
|
||||
memset(&lvid_meta, 0, sizeof(lvid_meta));
|
||||
memset(&lvid_data, 0, sizeof(lvid_meta));
|
||||
memcpy(&lvid_meta.id[0], &lv->vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[0], &lv->vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
|
||||
|
||||
if (!(dlid_meta = dm_build_dm_uuid(cmd->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
|
||||
return_0;
|
||||
if (!(dlid_data = dm_build_dm_uuid(cmd->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
|
||||
return_0;
|
||||
if (!(name_meta = dm_build_dm_name(cmd->mem, lv->vg->name, pool_lv->name, "_cmeta")))
|
||||
return_0;
|
||||
if (!(name_data = dm_build_dm_name(cmd->mem, lv->vg->name, pool_lv->name, "_cdata")))
|
||||
return_0;
|
||||
|
||||
if (!_info(cmd, name_meta, dlid_meta, 1, 0, info_meta, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
if (!_info(cmd, name_data, dlid_data, 1, 0, info_data, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: isn't there a simpler, more direct way to just remove these two dm
|
||||
* devs?
|
||||
*/
|
||||
|
||||
int remove_cache_vol_meta_data(struct cmd_context *cmd,
|
||||
struct dm_info *info_meta, struct dm_info *info_data)
|
||||
{
|
||||
struct dm_tree *dtree;
|
||||
struct dm_tree_node *root;
|
||||
struct dm_tree_node *child;
|
||||
const char *uuid;
|
||||
void *handle = NULL;
|
||||
|
||||
if (!(dtree = dm_tree_create()))
|
||||
goto_out;
|
||||
|
||||
if (!dm_tree_add_dev(dtree, info_meta->major, info_meta->minor))
|
||||
goto_out;
|
||||
|
||||
if (!dm_tree_add_dev(dtree, info_data->major, info_data->minor))
|
||||
goto_out;
|
||||
|
||||
if (!(root = dm_tree_find_node(dtree, 0, 0)))
|
||||
goto_out;
|
||||
|
||||
while ((child = dm_tree_next_child(&handle, root, 0))) {
|
||||
if (!(uuid = dm_tree_node_get_uuid(child))) {
|
||||
stack;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!dm_tree_deactivate_children(root, uuid, strlen(uuid))) {
|
||||
stack;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
dm_tree_free(dtree);
|
||||
return 1;
|
||||
out:
|
||||
dm_tree_free(dtree);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor)
|
||||
{
|
||||
struct dm_task *dmt;
|
||||
@@ -877,7 +940,7 @@ out:
|
||||
|
||||
static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
|
||||
{
|
||||
return _info_run(NULL, info, NULL, NULL, NULL, 0, 0, major, minor);
|
||||
return _info_run(NULL, info, NULL, 0, 0, 0, major, minor);
|
||||
}
|
||||
|
||||
int dev_manager_check_prefix_dm_major_minor(uint32_t major, uint32_t minor, const char *prefix)
|
||||
@@ -899,7 +962,7 @@ int dev_manager_check_prefix_dm_major_minor(uint32_t major, uint32_t minor, cons
|
||||
|
||||
int dev_manager_info(struct cmd_context *cmd,
|
||||
const struct logical_volume *lv, const char *layer,
|
||||
int with_open_count, int with_read_ahead, int with_name_check,
|
||||
int with_open_count, int with_read_ahead,
|
||||
struct dm_info *dminfo, uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status)
|
||||
{
|
||||
@@ -912,8 +975,7 @@ int dev_manager_info(struct cmd_context *cmd,
|
||||
if (!(dlid = build_dm_uuid(cmd->mem, lv, layer)))
|
||||
goto_out;
|
||||
|
||||
if (!(r = _info(cmd, name, dlid,
|
||||
with_open_count, with_read_ahead, with_name_check,
|
||||
if (!(r = _info(cmd, name, dlid, with_open_count, with_read_ahead,
|
||||
dminfo, read_ahead, seg_status)))
|
||||
stack;
|
||||
out:
|
||||
@@ -1325,6 +1387,8 @@ struct dev_manager *dev_manager_create(struct cmd_context *cmd,
|
||||
|
||||
dm_udev_set_sync_support(cmd->current_settings.udev_sync);
|
||||
|
||||
dm_list_init(&dm->pending_delete);
|
||||
|
||||
return dm;
|
||||
|
||||
bad:
|
||||
@@ -1564,6 +1628,9 @@ int dev_manager_cache_status(struct dev_manager *dm,
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
|
||||
return_0;
|
||||
|
||||
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
|
||||
return_0;
|
||||
|
||||
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
|
||||
return_0;
|
||||
|
||||
@@ -1586,11 +1653,8 @@ int dev_manager_cache_status(struct dev_manager *dm,
|
||||
if (!dm_get_status_cache(dm->mem, params, &c))
|
||||
goto_out;
|
||||
|
||||
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
|
||||
goto_out;
|
||||
|
||||
(*status)->mem = dm->mem; /* User has to destroy this mem pool later */
|
||||
(*status)->cache = c;
|
||||
(*status)->mem = dm->mem; /* User has to destroy this mem pool later */
|
||||
if (c->fail || c->error) {
|
||||
(*status)->data_usage =
|
||||
(*status)->metadata_usage =
|
||||
@@ -1612,10 +1676,10 @@ out:
|
||||
}
|
||||
|
||||
int dev_manager_thin_pool_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin_pool **status)
|
||||
const struct logical_volume *lv,
|
||||
struct dm_status_thin_pool **status,
|
||||
int flush)
|
||||
{
|
||||
struct dm_status_thin_pool *dm_status;
|
||||
const char *dlid;
|
||||
struct dm_task *dmt;
|
||||
struct dm_info info;
|
||||
@@ -1636,31 +1700,11 @@ int dev_manager_thin_pool_status(struct dev_manager *dm,
|
||||
|
||||
dm_get_next_target(dmt, NULL, &start, &length, &type, ¶ms);
|
||||
|
||||
if (!type || strcmp(type, TARGET_NAME_THIN_POOL)) {
|
||||
log_error("Expected %s segment type but got %s instead.",
|
||||
TARGET_NAME_THIN_POOL, type ? type : "NULL");
|
||||
goto out;
|
||||
}
|
||||
/* FIXME Check for thin and check there's exactly one target */
|
||||
|
||||
if (!dm_get_status_thin_pool(dm->mem, params, &dm_status))
|
||||
if (!dm_get_status_thin_pool(dm->mem, params, status))
|
||||
goto_out;
|
||||
|
||||
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin_pool))))
|
||||
goto_out;
|
||||
|
||||
(*status)->mem = dm->mem;
|
||||
(*status)->thin_pool = dm_status;
|
||||
|
||||
if (dm_status->fail || dm_status->error) {
|
||||
(*status)->data_usage =
|
||||
(*status)->metadata_usage = DM_PERCENT_INVALID;
|
||||
} else {
|
||||
(*status)->data_usage = dm_make_percent(dm_status->used_data_blocks,
|
||||
dm_status->total_data_blocks);
|
||||
(*status)->metadata_usage = dm_make_percent(dm_status->used_metadata_blocks,
|
||||
dm_status->total_metadata_blocks);
|
||||
}
|
||||
|
||||
r = 1;
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
@@ -1668,73 +1712,54 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
int dev_manager_thin_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin **status)
|
||||
int dev_manager_thin_pool_percent(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
int metadata, dm_percent_t *percent)
|
||||
{
|
||||
struct dm_status_thin *dm_status;
|
||||
char *name;
|
||||
const char *dlid;
|
||||
struct dm_task *dmt;
|
||||
struct dm_info info;
|
||||
uint64_t start, length;
|
||||
char *type = NULL;
|
||||
char *params = NULL;
|
||||
uint64_t csize;
|
||||
int r = 0;
|
||||
const char *layer = lv_layer(lv);
|
||||
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
|
||||
/* Build a name for the top layer */
|
||||
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
|
||||
return_0;
|
||||
|
||||
if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
|
||||
return_0;
|
||||
|
||||
if (!info.exists)
|
||||
goto_out;
|
||||
log_debug_activation("Getting device status percentage for %s.", name);
|
||||
|
||||
dm_get_next_target(dmt, NULL, &start, &length, &type, ¶ms);
|
||||
if (!(_percent(dm, name, dlid, TARGET_NAME_THIN_POOL, 0,
|
||||
(metadata) ? lv : NULL, percent, NULL, 1)))
|
||||
return_0;
|
||||
|
||||
if (!type || strcmp(type, TARGET_NAME_THIN)) {
|
||||
log_error("Expected %s segment type but got %s instead.",
|
||||
TARGET_NAME_THIN, type ? type : "NULL");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!dm_get_status_thin(dm->mem, params, &dm_status))
|
||||
goto_out;
|
||||
|
||||
if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin))))
|
||||
goto_out;
|
||||
|
||||
(*status)->mem = dm->mem;
|
||||
(*status)->thin = dm_status;
|
||||
|
||||
if (dm_status->fail)
|
||||
(*status)->usage = DM_PERCENT_INVALID;
|
||||
else {
|
||||
/* Pool allocates whole chunk so round-up to nearest one */
|
||||
csize = first_seg(first_seg(lv)->pool_lv)->chunk_size;
|
||||
csize = ((lv->size + csize - 1) / csize) * csize;
|
||||
if (dm_status->mapped_sectors > csize) {
|
||||
log_warn("WARNING: LV %s maps %s while the size is only %s.",
|
||||
display_lvname(lv),
|
||||
display_size(dm->cmd, dm_status->mapped_sectors),
|
||||
display_size(dm->cmd, csize));
|
||||
/* Don't show nonsense numbers like i.e. 1000% full */
|
||||
dm_status->mapped_sectors = csize;
|
||||
}
|
||||
(*status)->usage = dm_make_percent(dm_status->mapped_sectors, csize);
|
||||
}
|
||||
|
||||
r = 1;
|
||||
out:
|
||||
dm_task_destroy(dmt);
|
||||
|
||||
return r;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dev_manager_thin_percent(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
int mapped, dm_percent_t *percent)
|
||||
{
|
||||
char *name;
|
||||
const char *dlid;
|
||||
const char *layer = lv_layer(lv);
|
||||
|
||||
/* Build a name for the top layer */
|
||||
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
|
||||
return_0;
|
||||
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
|
||||
return_0;
|
||||
|
||||
log_debug_activation("Getting device status percentage for %s", name);
|
||||
|
||||
if (!(_percent(dm, name, dlid, TARGET_NAME_THIN, 0,
|
||||
(mapped) ? NULL : lv, percent, NULL, 1)))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Explore state of running DM table to obtain currently used deviceId
|
||||
*/
|
||||
int dev_manager_thin_device_id(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
uint32_t *device_id)
|
||||
@@ -1744,16 +1769,10 @@ int dev_manager_thin_device_id(struct dev_manager *dm,
|
||||
struct dm_info info;
|
||||
uint64_t start, length;
|
||||
char *params, *target_type = NULL;
|
||||
const char *layer = lv_layer(lv);
|
||||
int r = 0;
|
||||
|
||||
if (lv_is_merging_origin(lv) && !lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
|
||||
/* If the merge has already happened, that table
|
||||
* can already be using correct LV without -real layer */
|
||||
layer = NULL;
|
||||
|
||||
/* Build dlid for the thin layer */
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
|
||||
return_0;
|
||||
|
||||
if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0)))
|
||||
@@ -1968,7 +1987,7 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_vol
|
||||
/* New thin-pool is regular LV with -tpool UUID suffix. */
|
||||
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
|
||||
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
|
||||
else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
|
||||
else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv))
|
||||
udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
|
||||
DM_UDEV_DISABLE_DISK_RULES_FLAG |
|
||||
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
|
||||
@@ -2015,20 +2034,10 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_vol
|
||||
|
||||
static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv, int origin_only);
|
||||
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_activate_opts *laopts,
|
||||
const char *layer);
|
||||
/*
|
||||
* Check for device holders (ATM used only for removed pvmove targets)
|
||||
* and add them into dtree structures.
|
||||
* When 'laopts != NULL' add them as new nodes - which also corrects READ_AHEAD.
|
||||
* Note: correct table are already explicitelly PRELOADED.
|
||||
*/
|
||||
|
||||
static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_activate_opts *laopts,
|
||||
uint32_t major, const char *d_name)
|
||||
const struct logical_volume *lv, uint32_t major,
|
||||
const char *d_name)
|
||||
{
|
||||
const char *default_uuid_prefix = dm_uuid_prefix();
|
||||
const size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
|
||||
@@ -2080,11 +2089,8 @@ static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
log_debug_activation("Found holder %s of %s.",
|
||||
display_lvname(lv_det),
|
||||
display_lvname(lv));
|
||||
if (!laopts) {
|
||||
if (!_add_lv_to_dtree(dm, dtree, lv_det, 0))
|
||||
goto_out;
|
||||
} else if (!_add_new_lv_to_dtree(dm, dtree, lv_det, laopts, 0))
|
||||
goto_out;
|
||||
if (!_add_lv_to_dtree(dm, dtree, lv_det, 0))
|
||||
goto_out;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2101,9 +2107,7 @@ out:
|
||||
* i.e. PVMOVE is being finished and final table is going to be resumed.
|
||||
*/
|
||||
static int _add_holders_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_activate_opts *laopts,
|
||||
const struct dm_info *info)
|
||||
const struct logical_volume *lv, struct dm_info *info)
|
||||
{
|
||||
const char *sysfs_dir = dm_sysfs_dir();
|
||||
char sysfs_path[PATH_MAX];
|
||||
@@ -2126,7 +2130,7 @@ static int _add_holders_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
while ((dirent = readdir(d)))
|
||||
/* Expects minor is added to 'dm-' prefix */
|
||||
if (!strncmp(dirent->d_name, "dm-", 3) &&
|
||||
!_check_holder(dm, dtree, lv, laopts, info->major, dirent->d_name))
|
||||
!_check_holder(dm, dtree, lv, info->major, dirent->d_name))
|
||||
goto_out;
|
||||
|
||||
r = 1;
|
||||
@@ -2146,10 +2150,10 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
|
||||
return_0;
|
||||
|
||||
if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer)))
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
|
||||
return_0;
|
||||
|
||||
if (!_info(dm->cmd, name, dlid, 1, 0, 0, &info, NULL, NULL))
|
||||
if (!_info(dm->cmd, name, dlid, 1, 0, &info, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
@@ -2188,7 +2192,7 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (info.exists && dm->track_pending_delete) {
|
||||
log_debug_activation("Tracking pending delete for %s (%s).",
|
||||
display_lvname(lv), dlid);
|
||||
if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, dlid))
|
||||
if (!str_list_add(dm->mem, &dm->pending_delete, dlid))
|
||||
return_0;
|
||||
}
|
||||
|
||||
@@ -2198,7 +2202,7 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
*/
|
||||
if (info.exists && !lv_is_pvmove(lv) &&
|
||||
!strchr(lv->name, '_') && !strncmp(lv->name, "pvmove", 6))
|
||||
if (!_add_holders_to_dtree(dm, dtree, lv, NULL, &info))
|
||||
if (!_add_holders_to_dtree(dm, dtree, lv, &info))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
@@ -2424,51 +2428,6 @@ static int _pool_register_callback(struct dev_manager *dm,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Add special devices _cmeta & _cdata on top of CacheVol to dm tree */
|
||||
static int _add_cvol_subdev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv, int meta_or_data)
|
||||
{
|
||||
const char *layer = meta_or_data ? "cmeta" : "cdata";
|
||||
struct dm_pool *mem = dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem;
|
||||
const struct logical_volume *pool_lv = first_seg(lv)->pool_lv;
|
||||
struct lv_segment *lvseg = first_seg(lv);
|
||||
struct dm_info info;
|
||||
char *name ,*dlid;
|
||||
union lvid lvid = { { { "" } } };
|
||||
|
||||
memcpy(&lvid.id[0], &lv->vg->id, sizeof(struct id));
|
||||
/* When ID is provided in form of metadata_id or data_id, otherwise use CVOL ID */
|
||||
memcpy(&lvid.id[1],
|
||||
(meta_or_data && lvseg->metadata_id) ? lvseg->metadata_id :
|
||||
(lvseg->data_id) ? lvseg->data_id : &pool_lv->lvid.id[1], sizeof(struct id));
|
||||
|
||||
if (!(dlid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&lvid.s, layer)))
|
||||
return_0;
|
||||
|
||||
/* Name is actually not really needed here, but aids debugging... */
|
||||
if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, layer)))
|
||||
return_0;
|
||||
|
||||
if (!_info(dm->cmd, name, dlid, 1, 0, 0, &info, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
if (info.exists) {
|
||||
if (!dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
|
||||
_get_udev_flags(dm, lv, layer, 0, 0, 0))) {
|
||||
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info.major, info.minor);
|
||||
return 0;
|
||||
}
|
||||
if (dm->track_pending_delete) {
|
||||
log_debug_activation("Tracking pending delete for %s %s (%s).",
|
||||
layer, display_lvname(lv), dlid);
|
||||
if (!str_list_add(mem, &dm->cmd->pending_delete, dlid))
|
||||
return_0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Declaration to resolve suspend tree and message passing for thin-pool */
|
||||
static int _add_target_to_dtree(struct dev_manager *dm,
|
||||
struct dm_tree_node *dnode,
|
||||
@@ -2503,11 +2462,51 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
/* Unused cache pool is activated as metadata */
|
||||
}
|
||||
|
||||
if (lv_is_cache(lv) && (plv = (first_seg(lv)->pool_lv)) && lv_is_cache_vol(plv)) {
|
||||
if (!_add_cvol_subdev_to_dtree(dm, dtree, lv, 0) ||
|
||||
!_add_cvol_subdev_to_dtree(dm, dtree, lv, 1) ||
|
||||
!_add_dev_to_dtree(dm, dtree, plv, lv_layer(plv)))
|
||||
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv) && dm->activation) {
|
||||
struct logical_volume *pool_lv = first_seg(lv)->pool_lv;
|
||||
struct lv_segment *lvseg = first_seg(lv);
|
||||
struct dm_info info_meta;
|
||||
struct dm_info info_data;
|
||||
union lvid lvid_meta;
|
||||
union lvid lvid_data;
|
||||
char *name_meta;
|
||||
char *name_data;
|
||||
char *dlid_meta;
|
||||
char *dlid_data;
|
||||
|
||||
memset(&lvid_meta, 0, sizeof(lvid_meta));
|
||||
memset(&lvid_data, 0, sizeof(lvid_meta));
|
||||
memcpy(&lvid_meta.id[0], &lv->vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[0], &lv->vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
|
||||
|
||||
if (!(dlid_meta = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
|
||||
return_0;
|
||||
if (!(dlid_data = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
|
||||
return_0;
|
||||
if (!(name_meta = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, "_cmeta")))
|
||||
return_0;
|
||||
if (!(name_data = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, "_cdata")))
|
||||
return_0;
|
||||
|
||||
if (!_info(dm->cmd, name_meta, dlid_meta, 1, 0, &info_meta, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
if (!_info(dm->cmd, name_data, dlid_data, 1, 0, &info_data, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
if (info_meta.exists &&
|
||||
!dm_tree_add_dev_with_udev_flags(dtree, info_meta.major, info_meta.minor,
|
||||
_get_udev_flags(dm, lv, NULL, 0, 0, 0))) {
|
||||
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info_meta.major, info_meta.minor);
|
||||
}
|
||||
|
||||
if (info_data.exists &&
|
||||
!dm_tree_add_dev_with_udev_flags(dtree, info_data.major, info_data.minor,
|
||||
_get_udev_flags(dm, lv, NULL, 0, 0, 0))) {
|
||||
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info_data.major, info_data.minor);
|
||||
}
|
||||
}
|
||||
|
||||
if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, NULL))
|
||||
@@ -2593,15 +2592,6 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
}
|
||||
}
|
||||
|
||||
if (lv_is_vdo_pool(lv)) {
|
||||
/*
|
||||
* For both origin_only and !origin_only
|
||||
* skips test for -vpool-real and vpool-cow
|
||||
*/
|
||||
if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (lv_is_cache(lv)) {
|
||||
if (!origin_only && !dm->activation && !dm->track_pending_delete) {
|
||||
/* Setup callback for non-activation partial tree */
|
||||
@@ -2663,10 +2653,6 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (!_add_lv_to_dtree(dm, dtree, seg->writecache, dm->activation ? origin_only : 1))
|
||||
return_0;
|
||||
}
|
||||
if (seg->integrity_meta_dev && seg_is_integrity(seg)) {
|
||||
if (!_add_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, dm->activation ? origin_only : 1))
|
||||
return_0;
|
||||
}
|
||||
if (seg->pool_lv &&
|
||||
(lv_is_cache_pool(seg->pool_lv) || lv_is_cache_vol(seg->pool_lv) || dm->track_external_lv_deps) &&
|
||||
/* When activating and not origin_only detect linear 'overlay' over pool */
|
||||
@@ -2677,8 +2663,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) &&
|
||||
/* origin only for cache without pending delete */
|
||||
(!dm->track_pending_delete || !lv_is_cache(lv)) &&
|
||||
!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s),
|
||||
lv_is_vdo_pool(seg_lv(seg, s)) ? 1 : 0))
|
||||
!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0))
|
||||
return_0;
|
||||
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
|
||||
!_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
|
||||
@@ -2749,7 +2734,7 @@ static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *d
|
||||
seg->lv->name, errid)))
|
||||
return_NULL;
|
||||
|
||||
if (!_info(dm->cmd, name, dlid, 1, 0, 0, &info, NULL, NULL))
|
||||
if (!_info(dm->cmd, name, dlid, 1, 0, &info, NULL, NULL))
|
||||
return_NULL;
|
||||
|
||||
if (!info.exists) {
|
||||
@@ -2904,11 +2889,8 @@ static int _add_layer_target_to_dtree(struct dev_manager *dm,
|
||||
if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
|
||||
return_0;
|
||||
|
||||
|
||||
/* Add linear mapping over layered LV */
|
||||
/* From VDO layer expose ONLY vdo pool header, we would need to use virtual size otherwise */
|
||||
if (!add_linear_area_to_dtree(dnode, lv_is_vdo_pool(lv) ? first_seg(lv)->vdo_pool_header_size : lv->size,
|
||||
lv->vg->extent_size,
|
||||
if (!add_linear_area_to_dtree(dnode, lv->size, lv->vg->extent_size,
|
||||
lv->vg->cmd->use_linear_target,
|
||||
lv->vg->name, lv->name) ||
|
||||
!dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0))
|
||||
@@ -3018,6 +3000,11 @@ static int _add_target_to_dtree(struct dev_manager *dm,
|
||||
&dm->pvmove_mirror_count);
|
||||
}
|
||||
|
||||
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_activate_opts *laopts,
|
||||
const char *layer);
|
||||
|
||||
static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
|
||||
struct dm_tree *dtree,
|
||||
struct logical_volume *external_lv,
|
||||
@@ -3123,11 +3110,6 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
|
||||
lv_layer(seg->writecache)))
|
||||
return_0;
|
||||
|
||||
if (seg->integrity_meta_dev && !laopts->origin_only &&
|
||||
!_add_new_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, laopts,
|
||||
lv_layer(seg->integrity_meta_dev)))
|
||||
return_0;
|
||||
|
||||
/* Add any LVs used by this segment */
|
||||
for (s = 0; s < seg->area_count; ++s) {
|
||||
if ((seg_type(seg, s) == AREA_LV) &&
|
||||
@@ -3136,9 +3118,7 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
|
||||
/* origin only for cache without pending delete */
|
||||
(!dm->track_pending_delete || !seg_is_cache(seg)) &&
|
||||
!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
|
||||
laopts,
|
||||
lv_is_vdo_pool(seg_lv(seg, s)) ?
|
||||
lv_layer(seg_lv(seg, s)) : NULL))
|
||||
laopts, NULL))
|
||||
return_0;
|
||||
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
|
||||
!lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
|
||||
@@ -3213,37 +3193,34 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
char *dlid_meta;
|
||||
char *dlid_data;
|
||||
char *dlid_pool;
|
||||
uint64_t meta_size = first_seg(lv)->metadata_len;
|
||||
uint64_t data_size = first_seg(lv)->data_len;
|
||||
uint64_t meta_len = first_seg(lv)->metadata_len;
|
||||
uint64_t data_len = first_seg(lv)->data_len;
|
||||
uint16_t udev_flags = _get_udev_flags(dm, lv, layer,
|
||||
laopts->noscan, laopts->temporary,
|
||||
0);
|
||||
|
||||
if (lv_is_pending_delete(lvseg->lv))
|
||||
dm->track_pending_delete = 1;
|
||||
log_debug("Add cache pool %s to dtree before cache %s", pool_lv->name, lv->name);
|
||||
|
||||
log_debug("Add cachevol %s to dtree before cache %s.", pool_lv->name, lv->name);
|
||||
|
||||
if (!_add_new_lv_to_dtree(dm, dtree, pool_lv, laopts, lv_layer(pool_lv))) {
|
||||
log_error("Failed to add cachevol to dtree before cache.");
|
||||
return 0;
|
||||
if (!_add_new_lv_to_dtree(dm, dtree, pool_lv, laopts, NULL)) {
|
||||
log_error("Failed to add cachepool to dtree before cache");
|
||||
return_0;
|
||||
}
|
||||
|
||||
memset(&lvid_meta, 0, sizeof(lvid_meta));
|
||||
memset(&lvid_data, 0, sizeof(lvid_meta));
|
||||
memcpy(&lvid_meta.id[0], &vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_meta.id[1], lvseg->metadata_id ? : &pool_lv->lvid.id[1], sizeof(struct id));
|
||||
memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[0], &vg->id, sizeof(struct id));
|
||||
memcpy(&lvid_data.id[1], lvseg->data_id ? : &pool_lv->lvid.id[1], sizeof(struct id));
|
||||
memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
|
||||
|
||||
if (!(dlid_meta = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_meta.s, "cmeta")))
|
||||
if (!(dlid_meta = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
|
||||
return_0;
|
||||
if (!(dlid_data = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_data.s, "cdata")))
|
||||
if (!(dlid_data = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
|
||||
return_0;
|
||||
|
||||
if (!(name_meta = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "cmeta")))
|
||||
if (!(name_meta = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "_cmeta")))
|
||||
return_0;
|
||||
if (!(name_data = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "cdata")))
|
||||
if (!(name_data = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "_cdata")))
|
||||
return_0;
|
||||
|
||||
if (!(dlid_pool = build_dm_uuid(dm->mem, pool_lv, NULL)))
|
||||
@@ -3260,23 +3237,17 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
udev_flags)))
|
||||
return_0;
|
||||
|
||||
if (dm->track_pending_delete) {
|
||||
log_debug_activation("Using error for pending meta delete %s.", display_lvname(lv));
|
||||
if (!dm_tree_node_add_error_target(dnode_meta, meta_size))
|
||||
return_0;
|
||||
} else {
|
||||
/* add load_segment to meta dnode: linear, size of meta area */
|
||||
if (!add_linear_area_to_dtree(dnode_meta,
|
||||
meta_size,
|
||||
lv->vg->extent_size,
|
||||
lv->vg->cmd->use_linear_target,
|
||||
lv->vg->name, lv->name))
|
||||
return_0;
|
||||
/* add load_segment to meta dnode: linear, size of meta area */
|
||||
if (!add_linear_area_to_dtree(dnode_meta,
|
||||
meta_len,
|
||||
lv->vg->extent_size,
|
||||
lv->vg->cmd->use_linear_target,
|
||||
lv->vg->name, lv->name))
|
||||
return_0;
|
||||
|
||||
/* add seg_area to prev load_seg: offset 0 maps to cachepool lv offset 0 */
|
||||
if (!dm_tree_node_add_target_area(dnode_meta, NULL, dlid_pool, 0))
|
||||
return_0;
|
||||
}
|
||||
/* add seg_area to prev load_seg: offset 0 maps to cachepool lv offset 0 */
|
||||
if (!dm_tree_node_add_target_area(dnode_meta, NULL, dlid_pool, 0))
|
||||
return_0;
|
||||
|
||||
/* add data dnode */
|
||||
if (!(dnode_data = dm_tree_add_new_dev_with_udev_flags(dtree,
|
||||
@@ -3289,23 +3260,17 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
udev_flags)))
|
||||
return_0;
|
||||
|
||||
if (dm->track_pending_delete) {
|
||||
log_debug_activation("Using error for pending data delete %s.", display_lvname(lv));
|
||||
if (!dm_tree_node_add_error_target(dnode_data, data_size))
|
||||
return_0;
|
||||
} else {
|
||||
/* add load_segment to data dnode: linear, size of data area */
|
||||
if (!add_linear_area_to_dtree(dnode_data,
|
||||
data_size,
|
||||
lv->vg->extent_size,
|
||||
lv->vg->cmd->use_linear_target,
|
||||
lv->vg->name, lv->name))
|
||||
return_0;
|
||||
/* add load_segment to data dnode: linear, size of data area */
|
||||
if (!add_linear_area_to_dtree(dnode_data,
|
||||
data_len,
|
||||
lv->vg->extent_size,
|
||||
lv->vg->cmd->use_linear_target,
|
||||
lv->vg->name, lv->name))
|
||||
return_0;
|
||||
|
||||
/* add seg_area to prev load_seg: offset 0 maps to cachepool lv after meta */
|
||||
if (!dm_tree_node_add_target_area(dnode_data, NULL, dlid_pool, meta_size))
|
||||
return_0;
|
||||
}
|
||||
/* add seg_area to prev load_seg: offset 0 maps to cachepool lv after meta */
|
||||
if (!dm_tree_node_add_target_area(dnode_data, NULL, dlid_pool, meta_len))
|
||||
return_0;
|
||||
}
|
||||
|
||||
/* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
|
||||
@@ -3368,10 +3333,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (!layer && lv_is_new_thin_pool(lv))
|
||||
layer = lv_layer(lv);
|
||||
|
||||
/* Adds -real to the dm uuid of wcorig LV. */
|
||||
if (!layer && lv_is_writecache_origin(lv))
|
||||
layer = lv_layer(lv); /* "real" */
|
||||
|
||||
if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
|
||||
return_0;
|
||||
|
||||
@@ -3449,9 +3410,8 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts))
|
||||
return_0;
|
||||
} else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) ||
|
||||
lv_is_vdo_pool(lv) ||
|
||||
lv_is_external_origin(lv))) {
|
||||
/* External origin or 'used' Thin pool or VDO pool is using layer */
|
||||
/* External origin or 'used' Thin pool is using layer */
|
||||
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv)))
|
||||
return_0;
|
||||
if (!_add_layer_target_to_dtree(dm, dnode, lv))
|
||||
@@ -3464,10 +3424,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (max_stripe_size < seg->stripe_size * seg->area_count)
|
||||
max_stripe_size = seg->stripe_size * seg->area_count;
|
||||
}
|
||||
|
||||
if (!layer && lv_is_vdo_pool(lv) &&
|
||||
!_add_layer_target_to_dtree(dm, dnode, lv))
|
||||
return_0;
|
||||
}
|
||||
|
||||
/* Setup thin pool callback */
|
||||
@@ -3483,17 +3439,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
!_pool_register_callback(dm, dnode, lv))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* Update tables for ANY PVMOVE holders for active LV where the name starts with 'pvmove',
|
||||
* but it's not anymore PVMOVE LV and also it's not a PVMOVE _mimage LV.
|
||||
* When resume happens, tables MUST be already preloaded with correct entries!
|
||||
* (since we can't preload different table while devices are suspended)
|
||||
*/
|
||||
if (!lv_is_pvmove(lv) && !strncmp(lv->name, "pvmove", 6) && !strchr(lv->name, '_') &&
|
||||
(dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL)))
|
||||
if (!_add_holders_to_dtree(dm, dtree, lv, laopts, dinfo))
|
||||
return_0;
|
||||
|
||||
if (read_ahead == DM_READ_AHEAD_AUTO) {
|
||||
/* we need RA at least twice a whole stripe - see the comment in md/raid0.c */
|
||||
read_ahead = max_stripe_size * 2;
|
||||
@@ -3613,6 +3558,13 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, const
|
||||
const char *name, *uuid;
|
||||
struct dm_str_list *dl;
|
||||
|
||||
/* Deactivate any tracked pending delete nodes */
|
||||
dm_list_iterate_items(dl, &dm->pending_delete) {
|
||||
log_debug_activation("Deleting tracked UUID %s.", dl->str);
|
||||
if (!dm_tree_deactivate_children(root, dl->str, strlen(dl->str)))
|
||||
return_0;
|
||||
}
|
||||
|
||||
while ((child = dm_tree_next_child(&handle, root, 0))) {
|
||||
if (!(name = dm_tree_node_get_name(child)))
|
||||
continue;
|
||||
@@ -3633,28 +3585,10 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, const
|
||||
if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
|
||||
continue;
|
||||
|
||||
if (!(uuid = dm_pool_strdup(dm->cmd->pending_delete_mem, uuid))) {
|
||||
log_error("_clean_tree: Failed to duplicate uuid.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, uuid))
|
||||
if (!dm_tree_deactivate_children(root, uuid, strlen(uuid)))
|
||||
return_0;
|
||||
}
|
||||
|
||||
/* Deactivate any tracked pending delete nodes */
|
||||
if (!dm_list_empty(&dm->cmd->pending_delete) && !dm_get_suspended_counter()) {
|
||||
fs_unlock();
|
||||
dm_tree_set_cookie(root, fs_get_cookie());
|
||||
dm_list_iterate_items(dl, &dm->cmd->pending_delete) {
|
||||
log_debug_activation("Deleting tracked UUID %s.", dl->str);
|
||||
if (!dm_tree_deactivate_children(root, dl->str, strlen(dl->str)))
|
||||
return_0;
|
||||
}
|
||||
dm_list_init(&dm->cmd->pending_delete);
|
||||
dm_pool_empty(dm->cmd->pending_delete_mem);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -3735,10 +3669,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
/* Add all required new devices to tree */
|
||||
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts,
|
||||
(lv_is_origin(lv) && laopts->origin_only) ? "real" :
|
||||
(laopts->origin_only &&
|
||||
(lv_is_thin_pool(lv) ||
|
||||
lv_is_vdo_pool(lv))) ?
|
||||
lv_layer(lv) : NULL))
|
||||
(lv_is_thin_pool(lv) && laopts->origin_only) ? "tpool" : NULL))
|
||||
goto_out;
|
||||
|
||||
/* Preload any devices required before any suspensions */
|
||||
@@ -3785,6 +3716,19 @@ int dev_manager_activate(struct dev_manager *dm, const struct logical_volume *lv
|
||||
if (!_tree_action(dm, lv, laopts, ACTIVATE))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* When lvm2 resumes a device and shortly after that it removes it,
|
||||
* udevd rule will try to blindly call 'dmsetup info' on already removed
|
||||
* device leaving the trace inside syslog about failing operation.
|
||||
*
|
||||
* TODO: It's not completely clear this call here is the best fix.
|
||||
* Maybe there can be a better sequence, but ATM we do usually resume
|
||||
* error device i.e. on cache deletion and remove it.
|
||||
* TODO2: there could be more similar cases!
|
||||
*/
|
||||
if (!dm_list_empty(&dm->pending_delete))
|
||||
fs_unlock();
|
||||
|
||||
if (!_tree_action(dm, lv, laopts, CLEAN))
|
||||
return_0;
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ void dev_manager_exit(void);
|
||||
*/
|
||||
int dev_manager_info(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
const char *layer,
|
||||
int with_open_count, int with_read_ahead, int with_name_check,
|
||||
int with_open_count, int with_read_ahead,
|
||||
struct dm_info *dminfo, uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status);
|
||||
|
||||
@@ -69,15 +69,19 @@ int dev_manager_writecache_message(struct dev_manager *dm,
|
||||
int dev_manager_cache_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_status_cache **status);
|
||||
int dev_manager_thin_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin **status);
|
||||
int dev_manager_thin_pool_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
struct dm_status_thin_pool **status,
|
||||
int flush);
|
||||
int dev_manager_thin_pool_percent(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
int metadata, dm_percent_t *percent);
|
||||
int dev_manager_thin_percent(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
int mapped, dm_percent_t *percent);
|
||||
int dev_manager_thin_device_id(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
uint32_t *device_id);
|
||||
int dev_manager_thin_pool_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv, int flush,
|
||||
struct lv_status_thin_pool **status);
|
||||
int dev_manager_vdo_pool_status(struct dev_manager *dm,
|
||||
const struct logical_volume *lv,
|
||||
struct lv_status_vdo **vdo_status,
|
||||
@@ -105,4 +109,12 @@ int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor);
|
||||
|
||||
int dev_manager_check_prefix_dm_major_minor(uint32_t major, uint32_t minor, const char *prefix);
|
||||
|
||||
int get_cache_vol_meta_data(struct cmd_context *cmd,
|
||||
struct logical_volume *lv,
|
||||
struct logical_volume *pool_lv,
|
||||
struct dm_info *info_meta, struct dm_info *info_data);
|
||||
|
||||
int remove_cache_vol_meta_data(struct cmd_context *cmd,
|
||||
struct dm_info *info_meta, struct dm_info *info_data);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -313,7 +313,7 @@ struct fs_op_parms {
|
||||
char *lv_name;
|
||||
char *dev;
|
||||
char *old_lv_name;
|
||||
char names[];
|
||||
char names[0];
|
||||
};
|
||||
|
||||
static void _store_str(char **pos, char **ptr, const char *str)
|
||||
|
||||
1531
lib/cache/lvmcache.c
vendored
1531
lib/cache/lvmcache.c
vendored
File diff suppressed because it is too large
Load Diff
103
lib/cache/lvmcache.h
vendored
103
lib/cache/lvmcache.h
vendored
@@ -41,9 +41,14 @@ struct lvmcache_vginfo;
|
||||
|
||||
/*
|
||||
* vgsummary represents a summary of the VG that is read
|
||||
* without a lock during label scan. It's used to populate
|
||||
* basic lvmcache vginfo/info during label scan prior to
|
||||
* vg_read().
|
||||
* without a lock. The info does not come through vg_read(),
|
||||
* but through reading mdas. It provides information about
|
||||
* the VG that is needed to lock the VG and then read it fully
|
||||
* with vg_read(), after which the VG summary should be checked
|
||||
* against the full VG metadata to verify it was correct (since
|
||||
* it was read without a lock.)
|
||||
*
|
||||
* Once read, vgsummary information is saved in lvmcache_vginfo.
|
||||
*/
|
||||
struct lvmcache_vgsummary {
|
||||
const char *vgname;
|
||||
@@ -58,8 +63,6 @@ struct lvmcache_vgsummary {
|
||||
int mda_num; /* 1 = summary from mda1, 2 = summary from mda2 */
|
||||
unsigned mda_ignored:1;
|
||||
unsigned zero_offset:1;
|
||||
unsigned mismatch:1; /* lvmcache sets if this summary differs from previous values */
|
||||
struct dm_list pvsummaries;
|
||||
};
|
||||
|
||||
int lvmcache_init(struct cmd_context *cmd);
|
||||
@@ -68,20 +71,18 @@ void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset);
|
||||
|
||||
int lvmcache_label_scan(struct cmd_context *cmd);
|
||||
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
int lvmcache_label_rescan_vg_rw(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
int lvmcache_label_reopen_vg_rw(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
|
||||
/* Add/delete a device */
|
||||
struct lvmcache_info *lvmcache_add(struct cmd_context *cmd, struct labeller *labeller, const char *pvid,
|
||||
struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
|
||||
struct device *dev, uint64_t label_sector,
|
||||
const char *vgname, const char *vgid,
|
||||
uint32_t vgstatus, int *is_duplicate);
|
||||
int lvmcache_add_orphan_vginfo(struct cmd_context *cmd, const char *vgname, struct format_type *fmt);
|
||||
int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt);
|
||||
void lvmcache_del(struct lvmcache_info *info);
|
||||
void lvmcache_del_dev(struct device *dev);
|
||||
|
||||
/* Update things */
|
||||
int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info *info,
|
||||
int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
|
||||
struct lvmcache_vgsummary *vgsummary);
|
||||
int lvmcache_update_vg_from_read(struct volume_group *vg, unsigned precommitted);
|
||||
int lvmcache_update_vg_from_write(struct volume_group *vg);
|
||||
@@ -90,8 +91,12 @@ void lvmcache_lock_vgname(const char *vgname, int read_only);
|
||||
void lvmcache_unlock_vgname(const char *vgname);
|
||||
|
||||
/* Queries */
|
||||
const struct format_type *lvmcache_fmt_from_vgname(struct cmd_context *cmd, const char *vgname, const char *vgid, unsigned revalidate_labels);
|
||||
int lvmcache_lookup_mda(struct lvmcache_vgsummary *vgsummary);
|
||||
|
||||
/* Decrement and test if there are still vg holders in vginfo. */
|
||||
int lvmcache_vginfo_holders_dec_and_test_for_zero(struct lvmcache_vginfo *vginfo);
|
||||
|
||||
struct lvmcache_vginfo *lvmcache_vginfo_from_vgname(const char *vgname,
|
||||
const char *vgid);
|
||||
struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid);
|
||||
@@ -101,11 +106,14 @@ const char *lvmcache_vgid_from_vgname(struct cmd_context *cmd, const char *vgnam
|
||||
struct device *lvmcache_device_from_pvid(struct cmd_context *cmd, const struct id *pvid, uint64_t *label_sector);
|
||||
const char *lvmcache_vgname_from_info(struct lvmcache_info *info);
|
||||
const struct format_type *lvmcache_fmt_from_info(struct lvmcache_info *info);
|
||||
int lvmcache_vgs_locked(void);
|
||||
|
||||
int lvmcache_get_vgnameids(struct cmd_context *cmd,
|
||||
struct dm_list *vgnameids,
|
||||
const char *only_this_vgname,
|
||||
int include_internal);
|
||||
int lvmcache_get_vgnameids(struct cmd_context *cmd, int include_internal,
|
||||
struct dm_list *vgnameids);
|
||||
|
||||
/* Returns list of struct dm_str_list containing pool-allocated copy of pvids */
|
||||
struct dm_list *lvmcache_get_pvids(struct cmd_context *cmd, const char *vgname,
|
||||
const char *vgid);
|
||||
|
||||
void lvmcache_drop_metadata(const char *vgname, int drop_precommitted);
|
||||
void lvmcache_commit_metadata(const char *vgname);
|
||||
@@ -159,18 +167,19 @@ int lvmcache_foreach_pv(struct lvmcache_vginfo *vginfo,
|
||||
uint64_t lvmcache_device_size(struct lvmcache_info *info);
|
||||
void lvmcache_set_device_size(struct lvmcache_info *info, uint64_t size);
|
||||
struct device *lvmcache_device(struct lvmcache_info *info);
|
||||
int lvmcache_is_orphan(struct lvmcache_info *info);
|
||||
unsigned lvmcache_mda_count(struct lvmcache_info *info);
|
||||
int lvmcache_vgid_is_cached(const char *vgid);
|
||||
uint64_t lvmcache_smallest_mda_size(struct lvmcache_info *info);
|
||||
|
||||
bool lvmcache_has_duplicate_devs(void);
|
||||
void lvmcache_del_dev_from_duplicates(struct device *dev);
|
||||
bool lvmcache_dev_is_unused_duplicate(struct device *dev);
|
||||
int lvmcache_pvid_in_unused_duplicates(const char *pvid);
|
||||
int lvmcache_get_unused_duplicates(struct cmd_context *cmd, struct dm_list *head);
|
||||
int vg_has_duplicate_pvs(struct volume_group *vg);
|
||||
|
||||
int lvmcache_found_duplicate_pvs(void);
|
||||
int lvmcache_found_duplicate_vgnames(void);
|
||||
bool lvmcache_has_duplicate_local_vgname(const char *vgid, const char *vgname);
|
||||
|
||||
void lvmcache_pvscan_duplicate_check(struct cmd_context *cmd);
|
||||
|
||||
int lvmcache_get_unused_duplicate_devs(struct cmd_context *cmd, struct dm_list *head);
|
||||
|
||||
int vg_has_duplicate_pvs(struct volume_group *vg);
|
||||
|
||||
int lvmcache_contains_lock_type_sanlock(struct cmd_context *cmd);
|
||||
|
||||
@@ -179,18 +188,40 @@ void lvmcache_get_max_name_lengths(struct cmd_context *cmd,
|
||||
|
||||
int lvmcache_vg_is_foreign(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
|
||||
bool lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
void lvmcache_lock_ordering(int enable);
|
||||
|
||||
int lvmcache_dev_is_unchosen_duplicate(struct device *dev);
|
||||
|
||||
void lvmcache_remove_unchosen_duplicate(struct device *dev);
|
||||
|
||||
int lvmcache_pvid_in_unchosen_duplicates(const char *pvid);
|
||||
|
||||
int lvmcache_get_vg_devs(struct cmd_context *cmd,
|
||||
struct lvmcache_vginfo *vginfo,
|
||||
struct dm_list *devs);
|
||||
void lvmcache_set_independent_location(const char *vgname);
|
||||
|
||||
int lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
|
||||
|
||||
int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, char *pvid);
|
||||
|
||||
/*
|
||||
* These are clvmd-specific functions and are not related to lvmcache.
|
||||
* FIXME: rename these with a clvm_ prefix in place of lvmcache_
|
||||
*/
|
||||
void lvmcache_save_vg(struct volume_group *vg, int precommitted);
|
||||
struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted);
|
||||
struct volume_group *lvmcache_get_saved_vg_latest(const char *vgid);
|
||||
void lvmcache_drop_saved_vgid(const char *vgid);
|
||||
|
||||
uint64_t lvmcache_max_metadata_size(void);
|
||||
void lvmcache_save_metadata_size(uint64_t val);
|
||||
|
||||
int dev_in_device_list(struct device *dev, struct dm_list *head);
|
||||
|
||||
bool lvmcache_has_bad_metadata(struct device *dev);
|
||||
int lvmcache_has_bad_metadata(struct device *dev);
|
||||
|
||||
bool lvmcache_has_old_metadata(struct cmd_context *cmd, const char *vgname, const char *vgid, struct device *dev);
|
||||
int lvmcache_has_old_metadata(struct cmd_context *cmd, const char *vgname, const char *vgid, struct device *dev);
|
||||
|
||||
void lvmcache_get_outdated_devs(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid,
|
||||
@@ -200,9 +231,9 @@ void lvmcache_get_outdated_mdas(struct cmd_context *cmd,
|
||||
struct device *dev,
|
||||
struct dm_list **mdas);
|
||||
|
||||
bool lvmcache_is_outdated_dev(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid,
|
||||
struct device *dev);
|
||||
int lvmcache_is_outdated_dev(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid,
|
||||
struct device *dev);
|
||||
|
||||
void lvmcache_del_outdated_devs(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid);
|
||||
@@ -210,16 +241,14 @@ void lvmcache_del_outdated_devs(struct cmd_context *cmd,
|
||||
void lvmcache_save_bad_mda(struct lvmcache_info *info, struct metadata_area *mda);
|
||||
|
||||
void lvmcache_get_bad_mdas(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid,
|
||||
struct dm_list *bad_mda_list);
|
||||
const char *vgname, const char *vgid,
|
||||
struct dm_list *bad_mdas);
|
||||
|
||||
void lvmcache_get_mdas(struct cmd_context *cmd,
|
||||
const char *vgname, const char *vgid,
|
||||
struct dm_list *mda_list);
|
||||
struct device *lvmcache_get_dump_src_dev(struct cmd_context *cmd, const char *vgname);
|
||||
|
||||
const char *dev_filtered_reason(struct device *dev);
|
||||
const char *devname_error_reason(const char *devname);
|
||||
|
||||
struct metadata_area *lvmcache_get_dev_mda(struct device *dev, int mda_num);
|
||||
struct metadata_area *lvmcache_get_mda(struct cmd_context *cmd,
|
||||
const char *vgname,
|
||||
struct device *dev,
|
||||
int use_mda_num);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -504,6 +504,9 @@ static int _cache_text_import(struct lv_segment *seg,
|
||||
|
||||
seg->lv->status |= strstr(seg->lv->name, "_corig") ? LV_PENDING_DELETE : 0;
|
||||
|
||||
if (!attach_pool_lv(seg, pool_lv, NULL, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
if (!_settings_text_import(seg, sn))
|
||||
return_0;
|
||||
|
||||
@@ -525,26 +528,17 @@ static int _cache_text_import(struct lv_segment *seg,
|
||||
if (!dm_config_get_uint64(sn, "data_len", &seg->data_len))
|
||||
return SEG_LOG_ERROR("Couldn't read data_len in");
|
||||
|
||||
/* Will use CVOL ID, when metadata_id is not provided */
|
||||
if (dm_config_has_node(sn, "metadata_id")) {
|
||||
if (!(seg->metadata_id = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*seg->metadata_id))))
|
||||
return SEG_LOG_ERROR("Couldn't allocate metadata_id in");
|
||||
if (!dm_config_get_str(sn, "metadata_id", &uuid))
|
||||
return SEG_LOG_ERROR("Couldn't read metadata_id in");
|
||||
if (!id_read_format(seg->metadata_id, uuid))
|
||||
return SEG_LOG_ERROR("Couldn't format metadata_id in");
|
||||
}
|
||||
if (!dm_config_get_str(sn, "metadata_id", &uuid))
|
||||
return SEG_LOG_ERROR("Couldn't read metadata_id in");
|
||||
|
||||
/* Will use CVOL ID, when data_id is not provided */
|
||||
if (dm_config_has_node(sn, "data_id")) {
|
||||
if (!(seg->data_id = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*seg->data_id))))
|
||||
return SEG_LOG_ERROR("Couldn't allocate data_id in");
|
||||
if (!dm_config_get_str(sn, "data_id", &uuid))
|
||||
return SEG_LOG_ERROR("Couldn't read data_id in");
|
||||
if (!id_read_format(seg->data_id, uuid))
|
||||
return SEG_LOG_ERROR("Couldn't format data_id in");
|
||||
}
|
||||
pool_lv->status |= LV_CACHE_VOL; /* Mark as cachevol LV */
|
||||
if (!id_read_format(&seg->metadata_id, uuid))
|
||||
return SEG_LOG_ERROR("Couldn't format metadata_id in");
|
||||
|
||||
if (!dm_config_get_str(sn, "data_id", &uuid))
|
||||
return SEG_LOG_ERROR("Couldn't read data_id in");
|
||||
|
||||
if (!id_read_format(&seg->data_id, uuid))
|
||||
return SEG_LOG_ERROR("Couldn't format data_id in");
|
||||
} else {
|
||||
/* Do not call this when LV is cache_vol. */
|
||||
/* load order is unknown, could be cache origin or pool LV, so check for both */
|
||||
@@ -552,9 +546,6 @@ static int _cache_text_import(struct lv_segment *seg,
|
||||
_fix_missing_defaults(first_seg(pool_lv));
|
||||
}
|
||||
|
||||
if (!attach_pool_lv(seg, pool_lv, NULL, NULL, NULL))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -590,17 +581,13 @@ static int _cache_text_export(const struct lv_segment *seg, struct formatter *f)
|
||||
outf(f, "data_start = " FMTu64, seg->data_start);
|
||||
outf(f, "data_len = " FMTu64, seg->data_len);
|
||||
|
||||
if (seg->metadata_id) {
|
||||
if (!id_write_format(seg->metadata_id, buffer, sizeof(buffer)))
|
||||
return_0;
|
||||
outf(f, "metadata_id = \"%s\"", buffer);
|
||||
}
|
||||
if (!id_write_format(&seg->metadata_id, buffer, sizeof(buffer)))
|
||||
return_0;
|
||||
outf(f, "metadata_id = \"%s\"", buffer);
|
||||
|
||||
if (seg->data_id) {
|
||||
if (!id_write_format(seg->data_id, buffer, sizeof(buffer)))
|
||||
return_0;
|
||||
outf(f, "data_id = \"%s\"", buffer);
|
||||
}
|
||||
if (!id_write_format(&seg->data_id, buffer, sizeof(buffer)))
|
||||
return_0;
|
||||
outf(f, "data_id = \"%s\"", buffer);
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -708,13 +695,13 @@ static int _cache_add_target_line(struct dev_manager *dm,
|
||||
memset(&metadata_lvid, 0, sizeof(metadata_lvid));
|
||||
memset(&data_lvid, 0, sizeof(data_lvid));
|
||||
memcpy(&metadata_lvid.id[0], &seg->lv->vg->id, sizeof(struct id));
|
||||
memcpy(&metadata_lvid.id[1], (seg->metadata_id) ? : &seg->pool_lv->lvid.id[1], sizeof(struct id));
|
||||
memcpy(&metadata_lvid.id[1], &seg->metadata_id, sizeof(struct id));
|
||||
memcpy(&data_lvid.id[0], &seg->lv->vg->id, sizeof(struct id));
|
||||
memcpy(&data_lvid.id[1], (seg->data_id) ? : &seg->pool_lv->lvid.id[1], sizeof(struct id));
|
||||
memcpy(&data_lvid.id[1], &seg->data_id, sizeof(struct id));
|
||||
|
||||
if (!(metadata_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&metadata_lvid.s, "cmeta")))
|
||||
if (!(metadata_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&metadata_lvid.s, NULL)))
|
||||
return_0;
|
||||
if (!(data_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&data_lvid.s, "cdata")))
|
||||
if (!(data_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&data_lvid.s, NULL)))
|
||||
return_0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1276,7 +1276,7 @@ int init_lvmcache_orphans(struct cmd_context *cmd)
|
||||
struct format_type *fmt;
|
||||
|
||||
dm_list_iterate_items(fmt, &cmd->formats)
|
||||
if (!lvmcache_add_orphan_vginfo(cmd, fmt->orphan_vg_name, fmt))
|
||||
if (!lvmcache_add_orphan_vginfo(fmt->orphan_vg_name, fmt))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
@@ -1362,11 +1362,6 @@ static int _init_segtypes(struct cmd_context *cmd)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#ifdef INTEGRITY_INTERNAL
|
||||
if (!init_integrity_segtypes(cmd, &seglib))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1497,8 +1492,6 @@ void destroy_config_context(struct cmd_context *cmd)
|
||||
dm_pool_destroy(cmd->mem);
|
||||
if (cmd->libmem)
|
||||
dm_pool_destroy(cmd->libmem);
|
||||
if (cmd->pending_delete_mem)
|
||||
dm_pool_destroy(cmd->pending_delete_mem);
|
||||
|
||||
free(cmd);
|
||||
}
|
||||
@@ -1527,9 +1520,6 @@ struct cmd_context *create_config_context(void)
|
||||
if (!(cmd->mem = dm_pool_create("command", 4 * 1024)))
|
||||
goto out;
|
||||
|
||||
if (!(cmd->pending_delete_mem = dm_pool_create("pending_delete", 1024)))
|
||||
goto_out;
|
||||
|
||||
dm_list_init(&cmd->config_files);
|
||||
dm_list_init(&cmd->tags);
|
||||
dm_list_init(&cmd->hints);
|
||||
@@ -1598,7 +1588,6 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
|
||||
dm_list_init(&cmd->formats);
|
||||
dm_list_init(&cmd->segtypes);
|
||||
dm_list_init(&cmd->tags);
|
||||
dm_list_init(&cmd->hints);
|
||||
dm_list_init(&cmd->config_files);
|
||||
label_init();
|
||||
|
||||
@@ -1678,9 +1667,6 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(cmd->pending_delete_mem = dm_pool_create("pending_delete", 1024)))
|
||||
goto_out;
|
||||
|
||||
if (!_init_lvm_conf(cmd))
|
||||
goto_out;
|
||||
|
||||
@@ -1729,6 +1715,8 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
|
||||
if (!init_lvmcache_orphans(cmd))
|
||||
goto_out;
|
||||
|
||||
dm_list_init(&cmd->unused_duplicate_devs);
|
||||
|
||||
if (!_init_segtypes(cmd))
|
||||
goto_out;
|
||||
|
||||
@@ -1748,8 +1736,6 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
|
||||
cmd->initialized.config = 1;
|
||||
|
||||
dm_list_init(&cmd->pending_delete);
|
||||
out:
|
||||
if (!cmd->initialized.config) {
|
||||
destroy_toolcontext(cmd);
|
||||
@@ -1835,7 +1821,7 @@ int refresh_toolcontext(struct cmd_context *cmd)
|
||||
*/
|
||||
|
||||
activation_release();
|
||||
hints_exit(cmd);
|
||||
hints_exit();
|
||||
lvmcache_destroy(cmd, 0, 0);
|
||||
label_scan_destroy(cmd);
|
||||
label_exit();
|
||||
@@ -1938,12 +1924,6 @@ int refresh_toolcontext(struct cmd_context *cmd)
|
||||
|
||||
cmd->initialized.config = 1;
|
||||
|
||||
if (!dm_list_empty(&cmd->pending_delete)) {
|
||||
log_debug(INTERNAL_ERROR "Unprocessed pending delete for %d devices.",
|
||||
dm_list_size(&cmd->pending_delete));
|
||||
dm_list_init(&cmd->pending_delete);
|
||||
}
|
||||
|
||||
if (cmd->initialized.connections && !init_connections(cmd))
|
||||
return_0;
|
||||
|
||||
@@ -1961,7 +1941,7 @@ void destroy_toolcontext(struct cmd_context *cmd)
|
||||
|
||||
archive_exit(cmd);
|
||||
backup_exit(cmd);
|
||||
hints_exit(cmd);
|
||||
hints_exit();
|
||||
lvmcache_destroy(cmd, 0, 0);
|
||||
label_scan_destroy(cmd);
|
||||
label_exit();
|
||||
@@ -1984,8 +1964,6 @@ void destroy_toolcontext(struct cmd_context *cmd)
|
||||
if (cmd->libmem)
|
||||
dm_pool_destroy(cmd->libmem);
|
||||
|
||||
if (cmd->pending_delete_mem)
|
||||
dm_pool_destroy(cmd->pending_delete_mem);
|
||||
#ifndef VALGRIND_POOL
|
||||
if (cmd->linebuffer) {
|
||||
/* Reset stream buffering to defaults */
|
||||
|
||||
@@ -148,12 +148,10 @@ struct cmd_context {
|
||||
unsigned unknown_system_id:1;
|
||||
unsigned include_historical_lvs:1; /* also process/report/display historical LVs */
|
||||
unsigned record_historical_lvs:1; /* record historical LVs */
|
||||
unsigned include_exported_vgs:1;
|
||||
unsigned include_foreign_vgs:1; /* report/display cmds can reveal foreign VGs */
|
||||
unsigned include_shared_vgs:1; /* report/display cmds can reveal lockd VGs */
|
||||
unsigned include_active_foreign_vgs:1; /* cmd should process foreign VGs with active LVs */
|
||||
unsigned vg_read_print_access_error:1; /* print access errors from vg_read */
|
||||
unsigned allow_mixed_block_sizes:1;
|
||||
unsigned force_access_clustered:1;
|
||||
unsigned lockd_gl_disable:1;
|
||||
unsigned lockd_vg_disable:1;
|
||||
@@ -162,9 +160,6 @@ struct cmd_context {
|
||||
unsigned lockd_vg_default_sh:1;
|
||||
unsigned lockd_vg_enforce_sh:1;
|
||||
unsigned lockd_lv_sh_for_ex:1;
|
||||
unsigned lockd_global_ex:1; /* set while global lock held ex (lockd) */
|
||||
unsigned lockf_global_ex:1; /* set while global lock held ex (flock) */
|
||||
unsigned nolocking:1;
|
||||
unsigned vg_notify:1;
|
||||
unsigned lv_notify:1;
|
||||
unsigned pv_notify:1;
|
||||
@@ -174,7 +169,6 @@ struct cmd_context {
|
||||
unsigned pvscan_cache_single:1;
|
||||
unsigned can_use_one_scan:1;
|
||||
unsigned is_clvmd:1;
|
||||
unsigned md_component_detection:1;
|
||||
unsigned use_full_md_check:1;
|
||||
unsigned is_activating:1;
|
||||
unsigned enable_hints:1; /* hints are enabled for cmds in general */
|
||||
@@ -182,14 +176,12 @@ struct cmd_context {
|
||||
unsigned pvscan_recreate_hints:1; /* enable special case hint handling for pvscan --cache */
|
||||
unsigned scan_lvs:1;
|
||||
unsigned wipe_outdated_pvs:1;
|
||||
unsigned filter_nodata_only:1; /* only use filters that do not require data from the dev */
|
||||
|
||||
/*
|
||||
* Devices and filtering.
|
||||
*/
|
||||
struct dev_filter *filter;
|
||||
struct dm_list hints;
|
||||
const char *md_component_checks;
|
||||
|
||||
/*
|
||||
* Configuration.
|
||||
@@ -240,8 +232,7 @@ struct cmd_context {
|
||||
const char *report_list_item_separator;
|
||||
const char *time_format;
|
||||
unsigned rand_seed;
|
||||
struct dm_list pending_delete; /* list of LVs for removal */
|
||||
struct dm_pool *pending_delete_mem; /* memory pool for pending deletes */
|
||||
struct dm_list unused_duplicate_devs; /* save preferences between lvmcache instances */
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -503,10 +503,10 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
|
||||
{
|
||||
char *fb, *fe;
|
||||
int r = 0;
|
||||
int sz, use_plain_read = 1;
|
||||
int use_mmap = 1;
|
||||
off_t mmap_offset = 0;
|
||||
char *buf = NULL;
|
||||
struct config_source *cs = dm_config_get_custom(cft);
|
||||
size_t rsize;
|
||||
|
||||
if (!_is_file_based_config_source(cs->type)) {
|
||||
log_error(INTERNAL_ERROR "config_file_read_fd: expected file, special file "
|
||||
@@ -515,28 +515,26 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only use plain read with regular files */
|
||||
/* Only use mmap with regular files */
|
||||
if (!(dev->flags & DEV_REGULAR) || size2)
|
||||
use_plain_read = 0;
|
||||
use_mmap = 0;
|
||||
|
||||
if (!(buf = malloc(size + size2))) {
|
||||
log_error("Failed to allocate circular buffer.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (use_plain_read) {
|
||||
/* Note: also used for lvm.conf to read all settings */
|
||||
for (rsize = 0; rsize < size; rsize += sz) {
|
||||
do {
|
||||
sz = read(dev_fd(dev), buf + rsize, size - rsize);
|
||||
} while ((sz < 0) && ((errno == EINTR) || (errno == EAGAIN)));
|
||||
|
||||
if (sz < 0) {
|
||||
log_sys_error("read", dev_name(dev));
|
||||
goto out;
|
||||
}
|
||||
if (use_mmap) {
|
||||
mmap_offset = offset % lvm_getpagesize();
|
||||
/* memory map the file */
|
||||
fb = mmap((caddr_t) 0, size + mmap_offset, PROT_READ,
|
||||
MAP_PRIVATE, dev_fd(dev), offset - mmap_offset);
|
||||
if (fb == (caddr_t) (-1)) {
|
||||
log_sys_error("mmap", dev_name(dev));
|
||||
goto out;
|
||||
}
|
||||
fb = fb + mmap_offset;
|
||||
} else {
|
||||
if (!(buf = malloc(size + size2))) {
|
||||
log_error("Failed to allocate circular buffer.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dev_read_bytes(dev, offset, size, buf))
|
||||
goto out;
|
||||
|
||||
@@ -544,9 +542,9 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
|
||||
if (!dev_read_bytes(dev, offset2, size2, buf + size))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
fb = buf;
|
||||
fb = buf;
|
||||
}
|
||||
|
||||
/*
|
||||
* The checksum passed in is the checksum from the mda_header
|
||||
@@ -575,7 +573,15 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
|
||||
r = 1;
|
||||
|
||||
out:
|
||||
free(buf);
|
||||
if (!use_mmap)
|
||||
free(buf);
|
||||
else {
|
||||
/* unmap the file */
|
||||
if (munmap(fb - mmap_offset, size + mmap_offset)) {
|
||||
log_sys_error("munmap", dev_name(dev));
|
||||
r = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -288,7 +288,7 @@ cfg_array(devices_preferred_names_CFG, "preferred_names", devices_CFG_SECTION, C
|
||||
"preferred_names = [ \"^/dev/mpath/\", \"^/dev/mapper/mpath\", \"^/dev/[hs]d\" ]\n"
|
||||
"#\n")
|
||||
|
||||
cfg_array(devices_filter_CFG, "filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, "#Sa|.*|", vsn(1, 0, 0), NULL, 0, NULL,
|
||||
cfg_array(devices_filter_CFG, "filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, "#Sa|.*/|", vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Limit the block devices that are used by LVM commands.\n"
|
||||
"This is a list of regular expressions used to accept or reject block\n"
|
||||
"device path names. Each regex is delimited by a vertical bar '|'\n"
|
||||
@@ -306,7 +306,7 @@ cfg_array(devices_filter_CFG, "filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENT
|
||||
"#\n"
|
||||
"Example\n"
|
||||
"Accept every block device:\n"
|
||||
"filter = [ \"a|.*|\" ]\n"
|
||||
"filter = [ \"a|.*/|\" ]\n"
|
||||
"Reject the cdrom drive:\n"
|
||||
"filter = [ \"r|/dev/cdrom|\" ]\n"
|
||||
"Work with just loopback devices, e.g. for testing:\n"
|
||||
@@ -314,10 +314,10 @@ cfg_array(devices_filter_CFG, "filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENT
|
||||
"Accept all loop devices and ide drives except hdc:\n"
|
||||
"filter = [ \"a|loop|\", \"r|/dev/hdc|\", \"a|/dev/ide|\", \"r|.*|\" ]\n"
|
||||
"Use anchors to be very specific:\n"
|
||||
"filter = [ \"a|^/dev/hda8$|\", \"r|.*|\" ]\n"
|
||||
"filter = [ \"a|^/dev/hda8$|\", \"r|.*/|\" ]\n"
|
||||
"#\n")
|
||||
|
||||
cfg_array(devices_global_filter_CFG, "global_filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, "#Sa|.*|", vsn(2, 2, 98), NULL, 0, NULL,
|
||||
cfg_array(devices_global_filter_CFG, "global_filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, "#Sa|.*/|", vsn(2, 2, 98), NULL, 0, NULL,
|
||||
"Limit the block devices that are used by LVM system components.\n"
|
||||
"Because devices/filter may be overridden from the command line, it is\n"
|
||||
"not suitable for system-wide device filtering, e.g. udev.\n"
|
||||
@@ -368,30 +368,7 @@ cfg(devices_multipath_component_detection_CFG, "multipath_component_detection",
|
||||
"Ignore devices that are components of DM multipath devices.\n")
|
||||
|
||||
cfg(devices_md_component_detection_CFG, "md_component_detection", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_MD_COMPONENT_DETECTION, vsn(1, 0, 18), NULL, 0, NULL,
|
||||
"Enable detection and exclusion of MD component devices.\n"
|
||||
"An MD component device is a block device that MD uses as part\n"
|
||||
"of a software RAID virtual device. When an LVM PV is created\n"
|
||||
"on an MD device, LVM must only use the top level MD device as\n"
|
||||
"the PV, and should ignore the underlying component devices.\n"
|
||||
"In cases where the MD superblock is located at the end of the\n"
|
||||
"component devices, it is more difficult for LVM to consistently\n"
|
||||
"identify an MD component, see the md_component_checks setting.\n")
|
||||
|
||||
cfg(devices_md_component_checks_CFG, "md_component_checks", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_MD_COMPONENT_CHECKS, vsn(2, 3, 2), NULL, 0, NULL,
|
||||
"The checks LVM should use to detect MD component devices.\n"
|
||||
"MD component devices are block devices used by MD software RAID.\n"
|
||||
"#\n"
|
||||
"Accepted values:\n"
|
||||
" auto\n"
|
||||
" LVM will skip scanning the end of devices when it has other\n"
|
||||
" indications that the device is not an MD component.\n"
|
||||
" start\n"
|
||||
" LVM will only scan the start of devices for MD superblocks.\n"
|
||||
" This does not incur extra I/O by LVM.\n"
|
||||
" full\n"
|
||||
" LVM will scan the start and end of devices for MD superblocks.\n"
|
||||
" This requires an extra read at the end of devices.\n"
|
||||
"#\n")
|
||||
"Ignore devices that are components of software RAID (md) devices.\n")
|
||||
|
||||
cfg(devices_fw_raid_component_detection_CFG, "fw_raid_component_detection", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_FW_RAID_COMPONENT_DETECTION, vsn(2, 2, 112), NULL, 0, NULL,
|
||||
"Ignore devices that are components of firmware RAID devices.\n"
|
||||
@@ -502,11 +479,6 @@ cfg(devices_allow_changes_with_duplicate_pvs_CFG, "allow_changes_with_duplicate_
|
||||
"Enabling this setting allows the VG to be used as usual even with\n"
|
||||
"uncertain devices.\n")
|
||||
|
||||
cfg(devices_allow_mixed_block_sizes_CFG, "allow_mixed_block_sizes", devices_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 3, 6), NULL, 0, NULL,
|
||||
"Allow PVs in the same VG with different logical block sizes.\n"
|
||||
"When allowed, the user is responsible to ensure that an LV is\n"
|
||||
"using PVs with matching block sizes when necessary.\n")
|
||||
|
||||
cfg_array(allocation_cling_tag_list_CFG, "cling_tag_list", allocation_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 2, 77), NULL, 0, NULL,
|
||||
"Advise LVM which PVs to use when searching for new space.\n"
|
||||
"When searching for free space to extend an LV, the 'cling' allocation\n"
|
||||
@@ -568,7 +540,7 @@ cfg(allocation_raid_stripe_all_devices_CFG, "raid_stripe_all_devices", allocatio
|
||||
"stripes to use.\n"
|
||||
"This was the default behaviour until release 2.02.162.\n")
|
||||
|
||||
cfg(allocation_cache_pool_metadata_require_separate_pvs_CFG, "cache_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_CACHE_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 106), NULL, 0, NULL,
|
||||
cfg(allocation_cache_pool_metadata_require_separate_pvs_CFG, "cache_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA, CFG_TYPE_BOOL, DEFAULT_CACHE_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 106), NULL, 0, NULL,
|
||||
"Cache pool metadata and data will always use different PVs.\n")
|
||||
|
||||
cfg(allocation_cache_pool_cachemode_CFG, "cache_pool_cachemode", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_CACHE_MODE, vsn(2, 2, 113), NULL, vsn(2, 2, 128),
|
||||
@@ -625,8 +597,8 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF
|
||||
"For cache target v1.9 the recommended maximumm is 1000000 chunks.\n"
|
||||
"Using cache pool with more chunks may degrade cache performance.\n")
|
||||
|
||||
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
"Thin pool metadata and data will always use different PVs.\n")
|
||||
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
"Thin pool metdata and data will always use different PVs.\n")
|
||||
|
||||
cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL,
|
||||
"Thin pool data chunks are zeroed before they are first used.\n"
|
||||
@@ -657,9 +629,6 @@ cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", a
|
||||
" 512KiB.\n"
|
||||
"#\n")
|
||||
|
||||
cfg(allocation_zero_metadata_CFG, "zero_metadata", allocation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_ZERO_METADATA, vsn(2, 3, 10), NULL, 0, NULL,
|
||||
"Zero whole metadata area before use with thin or cache pool.\n")
|
||||
|
||||
cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), 0, NULL,
|
||||
"The minimal chunk size in KiB for thin pool volumes.\n"
|
||||
"Larger chunk sizes may improve performance for plain thin volumes,\n"
|
||||
@@ -951,7 +920,7 @@ cfg(global_format_CFG, "format", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_
|
||||
cfg_array(global_format_libraries_CFG, "format_libraries", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 0), NULL, vsn(2, 3, 0), NULL,
|
||||
"This setting is no longer used.")
|
||||
|
||||
cfg_array(global_segment_libraries_CFG, "segment_libraries", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 18), NULL, vsn(2, 3, 3), NULL, NULL)
|
||||
cfg_array(global_segment_libraries_CFG, "segment_libraries", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 18), NULL, 0, NULL, NULL)
|
||||
|
||||
cfg(global_proc_CFG, "proc", global_CFG_SECTION, CFG_ADVANCED, CFG_TYPE_STRING, DEFAULT_PROC_DIR, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Location of proc filesystem.\n")
|
||||
@@ -1031,7 +1000,7 @@ cfg(global_mirror_segtype_default_CFG, "mirror_segtype_default", global_CFG_SECT
|
||||
" fashion in a cluster.\n"
|
||||
"#\n")
|
||||
|
||||
cfg(global_support_mirrored_mirror_log_CFG, "support_mirrored_mirror_log", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, 0, vsn(2, 3, 2), NULL, 0, NULL,
|
||||
cfg(global_support_mirrored_mirror_log_CFG, "support_mirrored_mirror_log", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 3, 2), NULL, 0, NULL,
|
||||
"Enable mirrored 'mirror' log type for testing.\n"
|
||||
"#\n"
|
||||
"This type is deprecated to create or convert to but can\n"
|
||||
@@ -1085,7 +1054,7 @@ cfg(global_lvdisplay_shows_full_device_path_CFG, "lvdisplay_shows_full_device_pa
|
||||
"Previously this was always shown as /dev/vgname/lvname even when that\n"
|
||||
"was never a valid path in the /dev filesystem.\n")
|
||||
|
||||
cfg(global_event_activation_CFG, "event_activation", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, 1, vsn(2, 3, 1), 0, 0, NULL,
|
||||
cfg(global_event_activation_CFG, "event_activation", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 1, vsn(2, 3, 1), 0, 0, NULL,
|
||||
"Activate LVs based on system-generated device events.\n"
|
||||
"When a device appears on the system, a system-generated event runs\n"
|
||||
"the pvscan command to activate LVs if the new PV completes the VG.\n"
|
||||
@@ -1289,7 +1258,7 @@ cfg(activation_udev_rules_CFG, "udev_rules", activation_CFG_SECTION, 0, CFG_TYPE
|
||||
"active LVs itself. Manual intervention may be required if this\n"
|
||||
"setting is changed while LVs are active.\n")
|
||||
|
||||
cfg(activation_verify_udev_operations_CFG, "verify_udev_operations", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_VERIFY_UDEV_OPERATIONS, vsn(2, 2, 86), NULL, 0, NULL,
|
||||
cfg(activation_verify_udev_operations_CFG, "verify_udev_operations", activation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_VERIFY_UDEV_OPERATIONS, vsn(2, 2, 86), NULL, 0, NULL,
|
||||
"Use extra checks in LVM to verify udev operations.\n"
|
||||
"This enables additional checks (and if necessary, repairs) on entries\n"
|
||||
"in the device directory after udev has completed processing its\n"
|
||||
@@ -1310,21 +1279,21 @@ cfg(activation_missing_stripe_filler_CFG, "missing_stripe_filler", activation_CF
|
||||
"other than 'error' with mirrored or snapshotted volumes is likely to\n"
|
||||
"result in data corruption.\n")
|
||||
|
||||
cfg(activation_use_linear_target_CFG, "use_linear_target", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_LINEAR_TARGET, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
cfg(activation_use_linear_target_CFG, "use_linear_target", activation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_USE_LINEAR_TARGET, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
"Use the linear target to optimize single stripe LVs.\n"
|
||||
"When disabled, the striped target is used. The linear target is an\n"
|
||||
"optimised version of the striped target that only handles a single\n"
|
||||
"stripe.\n")
|
||||
|
||||
cfg(activation_reserved_stack_CFG, "reserved_stack", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_RESERVED_STACK, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
cfg(activation_reserved_stack_CFG, "reserved_stack", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RESERVED_STACK, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Stack size in KiB to reserve for use while devices are suspended.\n"
|
||||
"Insufficent reserve risks I/O deadlock during device suspension.\n")
|
||||
|
||||
cfg(activation_reserved_memory_CFG, "reserved_memory", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_RESERVED_MEMORY, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
cfg(activation_reserved_memory_CFG, "reserved_memory", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_RESERVED_MEMORY, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Memory size in KiB to reserve for use while devices are suspended.\n"
|
||||
"Insufficent reserve risks I/O deadlock during device suspension.\n")
|
||||
|
||||
cfg(activation_process_priority_CFG, "process_priority", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_PROCESS_PRIORITY, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
cfg(activation_process_priority_CFG, "process_priority", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_PROCESS_PRIORITY, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Nice value used while devices are suspended.\n"
|
||||
"Use a high priority so that LVs are suspended\n"
|
||||
"for the shortest possible time.\n")
|
||||
@@ -1433,7 +1402,7 @@ cfg(activation_error_when_full_CFG, "error_when_full", activation_CFG_SECTION, C
|
||||
"thin pool data space is extended. New thin pools are assigned the\n"
|
||||
"behavior defined here.\n")
|
||||
|
||||
cfg(activation_readahead_CFG, "readahead", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_READ_AHEAD, vsn(1, 0, 23), NULL, 0, NULL,
|
||||
cfg(activation_readahead_CFG, "readahead", activation_CFG_SECTION, 0, CFG_TYPE_STRING, DEFAULT_READ_AHEAD, vsn(1, 0, 23), NULL, 0, NULL,
|
||||
"Setting to use when there is no readahead setting in metadata.\n"
|
||||
"#\n"
|
||||
"Accepted values:\n"
|
||||
@@ -1561,7 +1530,7 @@ cfg(activation_thin_pool_autoextend_percent_CFG, "thin_pool_autoextend_percent",
|
||||
"thin_pool_autoextend_percent = 20\n"
|
||||
"#\n")
|
||||
|
||||
cfg(activation_vdo_pool_autoextend_threshold_CFG, "vdo_pool_autoextend_threshold", activation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_VDO_POOL_AUTOEXTEND_THRESHOLD, VDO_1ST_VSN, NULL, 0, NULL,
|
||||
cfg(activation_vdo_pool_autoextend_threshold_CFG, "vdo_pool_autoextend_threshold", activation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA, CFG_TYPE_INT, DEFAULT_VDO_POOL_AUTOEXTEND_THRESHOLD, VDO_1ST_VSN, NULL, 0, NULL,
|
||||
"Auto-extend a VDO pool when its usage exceeds this percent.\n"
|
||||
"Setting this to 100 disables automatic extension.\n"
|
||||
"The minimum value is 50 (a smaller value is treated as 50.)\n"
|
||||
@@ -1601,7 +1570,7 @@ cfg_array(activation_mlock_filter_CFG, "mlock_filter", activation_CFG_SECTION, C
|
||||
"mlock_filter = [ \"locale/locale-archive\", \"gconv/gconv-modules.cache\" ]\n"
|
||||
"#\n")
|
||||
|
||||
cfg(activation_use_mlockall_CFG, "use_mlockall", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_MLOCKALL, vsn(2, 2, 62), NULL, 0, NULL,
|
||||
cfg(activation_use_mlockall_CFG, "use_mlockall", activation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_USE_MLOCKALL, vsn(2, 2, 62), NULL, 0, NULL,
|
||||
"Use the old behavior of mlockall to pin all memory.\n"
|
||||
"Prior to version 2.02.62, LVM used mlockall() to pin the whole\n"
|
||||
"process's memory while activating devices.\n")
|
||||
@@ -1611,7 +1580,7 @@ cfg(activation_monitoring_CFG, "monitoring", activation_CFG_SECTION, 0, CFG_TYPE
|
||||
"The --ignoremonitoring option overrides this setting.\n"
|
||||
"When enabled, LVM will ask dmeventd to monitor activated LVs.\n")
|
||||
|
||||
cfg(activation_polling_interval_CFG, "polling_interval", activation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_INTERVAL, vsn(2, 2, 63), NULL, 0, NULL,
|
||||
cfg(activation_polling_interval_CFG, "polling_interval", activation_CFG_SECTION, 0, CFG_TYPE_INT, DEFAULT_INTERVAL, vsn(2, 2, 63), NULL, 0, NULL,
|
||||
"Check pvmove or lvconvert progress at this interval (seconds).\n"
|
||||
"When pvmove or lvconvert must wait for the kernel to finish\n"
|
||||
"synchronising or merging data, they check and report progress at\n"
|
||||
@@ -1702,7 +1671,7 @@ cfg(metadata_vgmetadatacopies_CFG, "vgmetadatacopies", metadata_CFG_SECTION, CFG
|
||||
"and allows you to control which metadata areas are used at the\n"
|
||||
"individual PV level using pvchange --metadataignore y|n.\n")
|
||||
|
||||
cfg_runtime(metadata_pvmetadatasize_CFG, "pvmetadatasize", metadata_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(1, 0, 0), 0, NULL,
|
||||
cfg_runtime(metadata_pvmetadatasize_CFG, "pvmetadatasize", metadata_CFG_SECTION, CFG_DEFAULT_COMMENTED | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(1, 0, 0), 0, NULL,
|
||||
"The default size of the metadata area in units of 512 byte sectors.\n"
|
||||
"The metadata area begins at an offset of the page size from the start\n"
|
||||
"of the device. The first PE is by default at 1 MiB from the start of\n"
|
||||
@@ -2050,7 +2019,7 @@ cfg(report_two_word_unknown_device_CFG, "two_word_unknown_device", report_CFG_SE
|
||||
"Use the two words 'unknown device' in place of '[unknown]'.\n"
|
||||
"This is displayed when the device for a PV is not known.\n")
|
||||
|
||||
cfg(dmeventd_mirror_library_CFG, "mirror_library", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_MIRROR_LIB, vsn(1, 2, 3), NULL, 0, NULL,
|
||||
cfg(dmeventd_mirror_library_CFG, "mirror_library", dmeventd_CFG_SECTION, 0, CFG_TYPE_STRING, DEFAULT_DMEVENTD_MIRROR_LIB, vsn(1, 2, 3), NULL, 0, NULL,
|
||||
"The library dmeventd uses when monitoring a mirror device.\n"
|
||||
"libdevmapper-event-lvm2mirror.so attempts to recover from\n"
|
||||
"failures. It removes failed devices from a volume group and\n"
|
||||
@@ -2059,13 +2028,13 @@ cfg(dmeventd_mirror_library_CFG, "mirror_library", dmeventd_CFG_SECTION, CFG_DEF
|
||||
|
||||
cfg(dmeventd_raid_library_CFG, "raid_library", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_RAID_LIB, vsn(2, 2, 87), NULL, 0, NULL, NULL)
|
||||
|
||||
cfg(dmeventd_snapshot_library_CFG, "snapshot_library", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_SNAPSHOT_LIB, vsn(1, 2, 26), NULL, 0, NULL,
|
||||
cfg(dmeventd_snapshot_library_CFG, "snapshot_library", dmeventd_CFG_SECTION, 0, CFG_TYPE_STRING, DEFAULT_DMEVENTD_SNAPSHOT_LIB, vsn(1, 2, 26), NULL, 0, NULL,
|
||||
"The library dmeventd uses when monitoring a snapshot device.\n"
|
||||
"libdevmapper-event-lvm2snapshot.so monitors the filling of snapshots\n"
|
||||
"and emits a warning through syslog when the usage exceeds 80%. The\n"
|
||||
"warning is repeated when 85%, 90% and 95% of the snapshot is filled.\n")
|
||||
|
||||
cfg(dmeventd_thin_library_CFG, "thin_library", dmeventd_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DMEVENTD_THIN_LIB, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
cfg(dmeventd_thin_library_CFG, "thin_library", dmeventd_CFG_SECTION, 0, CFG_TYPE_STRING, DEFAULT_DMEVENTD_THIN_LIB, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
"The library dmeventd uses when monitoring a thin device.\n"
|
||||
"libdevmapper-event-lvm2thin.so monitors the filling of a pool\n"
|
||||
"and emits a warning through syslog when the usage exceeds 80%. The\n"
|
||||
|
||||
@@ -129,7 +129,6 @@
|
||||
#define DEFAULT_THIN_POOL_DISCARDS "passdown"
|
||||
#define DEFAULT_THIN_POOL_ZERO 1
|
||||
#define DEFAULT_POOL_METADATA_SPARE 1 /* thin + cache */
|
||||
#define DEFAULT_ZERO_METADATA 1 /* thin + cache */
|
||||
|
||||
#ifdef CACHE_CHECK_NEEDS_CHECK
|
||||
# define DEFAULT_CACHE_CHECK_OPTION1 "-q"
|
||||
@@ -165,7 +164,7 @@
|
||||
#define DEFAULT_VDO_INDEX_MEMORY_SIZE_MB (DM_VDO_INDEX_MEMORY_SIZE_MINIMUM_MB)
|
||||
#define DEFAULT_VDO_SLAB_SIZE_MB (2 * 1024) // 2GiB ... 19 slabbits
|
||||
#define DEFAULT_VDO_ACK_THREADS (1)
|
||||
#define DEFAULT_VDO_BIO_THREADS (4)
|
||||
#define DEFAULT_VDO_BIO_THREADS (1)
|
||||
#define DEFAULT_VDO_BIO_ROTATION (64)
|
||||
#define DEFAULT_VDO_CPU_THREADS (2)
|
||||
#define DEFAULT_VDO_HASH_ZONE_THREADS (1)
|
||||
@@ -319,6 +318,4 @@
|
||||
|
||||
#define DEFAULT_IO_MEMORY_SIZE_KB 8192
|
||||
|
||||
#define DEFAULT_MD_COMPONENT_CHECKS "auto"
|
||||
|
||||
#endif /* _LVM_DEFAULTS_H */
|
||||
|
||||
@@ -39,32 +39,32 @@ static uint64_t _min(uint64_t lhs, uint64_t rhs)
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
void bcache_prefetch_bytes(struct bcache *cache, int di, uint64_t start, size_t len)
|
||||
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
|
||||
{
|
||||
block_address bb, be;
|
||||
|
||||
byte_range_to_block_range(cache, start, len, &bb, &be);
|
||||
while (bb < be) {
|
||||
bcache_prefetch(cache, di, bb);
|
||||
bcache_prefetch(cache, fd, bb);
|
||||
bb++;
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
bool bcache_read_bytes(struct bcache *cache, int di, uint64_t start, size_t len, void *data)
|
||||
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
|
||||
{
|
||||
struct block *b;
|
||||
block_address bb, be;
|
||||
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
|
||||
uint64_t block_offset = start % block_size;
|
||||
|
||||
bcache_prefetch_bytes(cache, di, start, len);
|
||||
bcache_prefetch_bytes(cache, fd, start, len);
|
||||
|
||||
byte_range_to_block_range(cache, start, len, &bb, &be);
|
||||
|
||||
for (; bb != be; bb++) {
|
||||
if (!bcache_get(cache, di, bb, 0, &b))
|
||||
if (!bcache_get(cache, fd, bb, 0, &b))
|
||||
return false;
|
||||
|
||||
size_t blen = _min(block_size - block_offset, len);
|
||||
@@ -79,21 +79,6 @@ bool bcache_read_bytes(struct bcache *cache, int di, uint64_t start, size_t len,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_invalidate_bytes(struct bcache *cache, int di, uint64_t start, size_t len)
|
||||
{
|
||||
block_address bb, be;
|
||||
bool result = true;
|
||||
|
||||
byte_range_to_block_range(cache, start, len, &bb, &be);
|
||||
|
||||
for (; bb != be; bb++) {
|
||||
if (!bcache_invalidate(cache, di, bb))
|
||||
result = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
// Writing bytes and zeroing bytes are very similar, so we factor out
|
||||
@@ -101,8 +86,8 @@ bool bcache_invalidate_bytes(struct bcache *cache, int di, uint64_t start, size_
|
||||
|
||||
struct updater;
|
||||
|
||||
typedef bool (*partial_update_fn)(struct updater *u, int di, block_address bb, uint64_t offset, size_t len);
|
||||
typedef bool (*whole_update_fn)(struct updater *u, int di, block_address bb, block_address be);
|
||||
typedef bool (*partial_update_fn)(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len);
|
||||
typedef bool (*whole_update_fn)(struct updater *u, int fd, block_address bb, block_address be);
|
||||
|
||||
struct updater {
|
||||
struct bcache *cache;
|
||||
@@ -111,7 +96,7 @@ struct updater {
|
||||
void *data;
|
||||
};
|
||||
|
||||
static bool _update_bytes(struct updater *u, int di, uint64_t start, size_t len)
|
||||
static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
|
||||
{
|
||||
struct bcache *cache = u->cache;
|
||||
block_address bb, be;
|
||||
@@ -124,12 +109,12 @@ static bool _update_bytes(struct updater *u, int di, uint64_t start, size_t len)
|
||||
// If the last block is partial, we will require a read, so let's
|
||||
// prefetch it.
|
||||
if ((start + len) % block_size)
|
||||
bcache_prefetch(cache, di, (start + len) / block_size);
|
||||
bcache_prefetch(cache, fd, (start + len) / block_size);
|
||||
|
||||
// First block may be partial
|
||||
if (block_offset) {
|
||||
size_t blen = _min(block_size - block_offset, len);
|
||||
if (!u->partial_fn(u, di, bb, block_offset, blen))
|
||||
if (!u->partial_fn(u, fd, bb, block_offset, blen))
|
||||
return false;
|
||||
|
||||
len -= blen;
|
||||
@@ -141,7 +126,7 @@ static bool _update_bytes(struct updater *u, int di, uint64_t start, size_t len)
|
||||
|
||||
// Now we write out a set of whole blocks
|
||||
nr_whole = len / block_size;
|
||||
if (!u->whole_fn(u, di, bb, bb + nr_whole))
|
||||
if (!u->whole_fn(u, fd, bb, bb + nr_whole))
|
||||
return false;
|
||||
|
||||
bb += nr_whole;
|
||||
@@ -151,17 +136,17 @@ static bool _update_bytes(struct updater *u, int di, uint64_t start, size_t len)
|
||||
return true;
|
||||
|
||||
// Finally we write a partial end block
|
||||
return u->partial_fn(u, di, bb, 0, len);
|
||||
return u->partial_fn(u, fd, bb, 0, len);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static bool _write_partial(struct updater *u, int di, block_address bb,
|
||||
static bool _write_partial(struct updater *u, int fd, block_address bb,
|
||||
uint64_t offset, size_t len)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
if (!bcache_get(u->cache, di, bb, GF_DIRTY, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
|
||||
return false;
|
||||
|
||||
memcpy(((unsigned char *) b->data) + offset, u->data, len);
|
||||
@@ -171,7 +156,7 @@ static bool _write_partial(struct updater *u, int di, block_address bb,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _write_whole(struct updater *u, int di, block_address bb, block_address be)
|
||||
static bool _write_whole(struct updater *u, int fd, block_address bb, block_address be)
|
||||
{
|
||||
struct block *b;
|
||||
uint64_t block_size = bcache_block_sectors(u->cache) << SECTOR_SHIFT;
|
||||
@@ -179,7 +164,7 @@ static bool _write_whole(struct updater *u, int di, block_address bb, block_addr
|
||||
for (; bb != be; bb++) {
|
||||
// We don't need to read the block since we are overwriting
|
||||
// it completely.
|
||||
if (!bcache_get(u->cache, di, bb, GF_ZERO, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
|
||||
return false;
|
||||
memcpy(b->data, u->data, block_size);
|
||||
u->data = ((unsigned char *) u->data) + block_size;
|
||||
@@ -189,7 +174,7 @@ static bool _write_whole(struct updater *u, int di, block_address bb, block_addr
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_write_bytes(struct bcache *cache, int di, uint64_t start, size_t len, void *data)
|
||||
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
|
||||
{
|
||||
struct updater u;
|
||||
|
||||
@@ -198,16 +183,16 @@ bool bcache_write_bytes(struct bcache *cache, int di, uint64_t start, size_t len
|
||||
u.whole_fn = _write_whole;
|
||||
u.data = data;
|
||||
|
||||
return _update_bytes(&u, di, start, len);
|
||||
return _update_bytes(&u, fd, start, len);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static bool _zero_partial(struct updater *u, int di, block_address bb, uint64_t offset, size_t len)
|
||||
static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
if (!bcache_get(u->cache, di, bb, GF_DIRTY, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
|
||||
return false;
|
||||
|
||||
memset(((unsigned char *) b->data) + offset, 0, len);
|
||||
@@ -216,12 +201,12 @@ static bool _zero_partial(struct updater *u, int di, block_address bb, uint64_t
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _zero_whole(struct updater *u, int di, block_address bb, block_address be)
|
||||
static bool _zero_whole(struct updater *u, int fd, block_address bb, block_address be)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
for (; bb != be; bb++) {
|
||||
if (!bcache_get(u->cache, di, bb, GF_ZERO, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
|
||||
return false;
|
||||
bcache_put(b);
|
||||
}
|
||||
@@ -229,7 +214,7 @@ static bool _zero_whole(struct updater *u, int di, block_address bb, block_addre
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_zero_bytes(struct bcache *cache, int di, uint64_t start, size_t len)
|
||||
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
|
||||
{
|
||||
struct updater u;
|
||||
|
||||
@@ -238,17 +223,17 @@ bool bcache_zero_bytes(struct bcache *cache, int di, uint64_t start, size_t len)
|
||||
u.whole_fn = _zero_whole;
|
||||
u.data = NULL;
|
||||
|
||||
return _update_bytes(&u, di, start, len);
|
||||
return _update_bytes(&u, fd, start, len);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static bool _set_partial(struct updater *u, int di, block_address bb, uint64_t offset, size_t len)
|
||||
static bool _set_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
|
||||
{
|
||||
struct block *b;
|
||||
uint8_t val = *((uint8_t *) u->data);
|
||||
|
||||
if (!bcache_get(u->cache, di, bb, GF_DIRTY, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
|
||||
return false;
|
||||
|
||||
memset(((unsigned char *) b->data) + offset, val, len);
|
||||
@@ -257,14 +242,14 @@ static bool _set_partial(struct updater *u, int di, block_address bb, uint64_t o
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _set_whole(struct updater *u, int di, block_address bb, block_address be)
|
||||
static bool _set_whole(struct updater *u, int fd, block_address bb, block_address be)
|
||||
{
|
||||
struct block *b;
|
||||
uint8_t val = *((uint8_t *) u->data);
|
||||
uint64_t len = bcache_block_sectors(u->cache) * 512;
|
||||
|
||||
for (; bb != be; bb++) {
|
||||
if (!bcache_get(u->cache, di, bb, GF_ZERO, &b))
|
||||
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
|
||||
return false;
|
||||
memset((unsigned char *) b->data, val, len);
|
||||
bcache_put(b);
|
||||
@@ -273,7 +258,7 @@ static bool _set_whole(struct updater *u, int di, block_address bb, block_addres
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_set_bytes(struct bcache *cache, int di, uint64_t start, size_t len, uint8_t val)
|
||||
bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val)
|
||||
{
|
||||
struct updater u;
|
||||
|
||||
@@ -282,6 +267,6 @@ bool bcache_set_bytes(struct bcache *cache, int di, uint64_t start, size_t len,
|
||||
u.whole_fn = _set_whole;
|
||||
u.data = &val;
|
||||
|
||||
return _update_bytes(&u, di, start, len);
|
||||
return _update_bytes(&u, fd, start, len);
|
||||
}
|
||||
|
||||
|
||||
@@ -33,11 +33,6 @@
|
||||
|
||||
#define SECTOR_SHIFT 9L
|
||||
|
||||
#define FD_TABLE_INC 1024
|
||||
static int _fd_table_size;
|
||||
static int *_fd_table;
|
||||
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static void log_sys_warn(const char *call)
|
||||
@@ -66,17 +61,23 @@ struct control_block {
|
||||
struct cb_set {
|
||||
struct dm_list free;
|
||||
struct dm_list allocated;
|
||||
struct control_block vec[];
|
||||
struct control_block *vec;
|
||||
} control_block_set;
|
||||
|
||||
static struct cb_set *_cb_set_create(unsigned nr)
|
||||
{
|
||||
unsigned i;
|
||||
struct cb_set *cbs = malloc(sizeof(*cbs) + nr * sizeof(*cbs->vec));
|
||||
int i;
|
||||
struct cb_set *cbs = malloc(sizeof(*cbs));
|
||||
|
||||
if (!cbs->vec)
|
||||
if (!cbs)
|
||||
return NULL;
|
||||
|
||||
cbs->vec = malloc(nr * sizeof(*cbs->vec));
|
||||
if (!cbs->vec) {
|
||||
free(cbs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dm_list_init(&cbs->free);
|
||||
dm_list_init(&cbs->allocated);
|
||||
|
||||
@@ -96,6 +97,7 @@ static void _cb_set_destroy(struct cb_set *cbs)
|
||||
return;
|
||||
}
|
||||
|
||||
free(cbs->vec);
|
||||
free(cbs);
|
||||
}
|
||||
|
||||
@@ -153,11 +155,11 @@ static void _async_destroy(struct io_engine *ioe)
|
||||
free(e);
|
||||
}
|
||||
|
||||
static int _last_byte_di;
|
||||
static int _last_byte_fd;
|
||||
static uint64_t _last_byte_offset;
|
||||
static int _last_byte_sector_size;
|
||||
|
||||
static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
|
||||
sector_t sb, sector_t se, void *data, void *context)
|
||||
{
|
||||
int r;
|
||||
@@ -167,7 +169,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
sector_t offset;
|
||||
sector_t nbytes;
|
||||
sector_t limit_nbytes;
|
||||
sector_t orig_nbytes;
|
||||
sector_t extra_nbytes = 0;
|
||||
|
||||
if (((uintptr_t) data) & e->page_mask) {
|
||||
@@ -181,7 +182,7 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
/*
|
||||
* If bcache block goes past where lvm wants to write, then clamp it.
|
||||
*/
|
||||
if ((d == DIR_WRITE) && _last_byte_offset && (di == _last_byte_di)) {
|
||||
if ((d == DIR_WRITE) && _last_byte_offset && (fd == _last_byte_fd)) {
|
||||
if (offset > _last_byte_offset) {
|
||||
log_error("Limit write at %llu len %llu beyond last byte %llu",
|
||||
(unsigned long long)offset,
|
||||
@@ -190,41 +191,11 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the bcache block offset+len goes beyond where lvm is
|
||||
* intending to write, then reduce the len being written
|
||||
* (which is the bcache block size) so we don't write past
|
||||
* the limit set by lvm. If after applying the limit, the
|
||||
* resulting size is not a multiple of the sector size (512
|
||||
* or 4096) then extend the reduced size to be a multiple of
|
||||
* the sector size (we don't want to write partial sectors.)
|
||||
*/
|
||||
if (offset + nbytes > _last_byte_offset) {
|
||||
limit_nbytes = _last_byte_offset - offset;
|
||||
|
||||
if (limit_nbytes % _last_byte_sector_size) {
|
||||
if (limit_nbytes % _last_byte_sector_size)
|
||||
extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
|
||||
|
||||
/*
|
||||
* adding extra_nbytes to the reduced nbytes (limit_nbytes)
|
||||
* should make the final write size a multiple of the
|
||||
* sector size. This should never result in a final size
|
||||
* larger than the bcache block size (as long as the bcache
|
||||
* block size is a multiple of the sector size).
|
||||
*/
|
||||
if (limit_nbytes + extra_nbytes > nbytes) {
|
||||
log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
|
||||
(unsigned long long)offset,
|
||||
(unsigned long long)nbytes,
|
||||
(unsigned long long)limit_nbytes,
|
||||
(unsigned long long)extra_nbytes,
|
||||
(unsigned long long)_last_byte_sector_size);
|
||||
extra_nbytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
orig_nbytes = nbytes;
|
||||
|
||||
if (extra_nbytes) {
|
||||
log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
|
||||
(unsigned long long)offset,
|
||||
@@ -239,22 +210,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
(unsigned long long)limit_nbytes);
|
||||
nbytes = limit_nbytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* This shouldn't happen, the reduced+extended
|
||||
* nbytes value should never be larger than the
|
||||
* bcache block size.
|
||||
*/
|
||||
if (nbytes > orig_nbytes) {
|
||||
log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
|
||||
(unsigned long long)offset,
|
||||
(unsigned long long)orig_nbytes,
|
||||
(unsigned long long)nbytes,
|
||||
(unsigned long long)limit_nbytes,
|
||||
(unsigned long long)extra_nbytes,
|
||||
(unsigned long long)_last_byte_sector_size);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -266,7 +221,7 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
|
||||
memset(&cb->cb, 0, sizeof(cb->cb));
|
||||
|
||||
cb->cb.aio_fildes = (int) _fd_table[di];
|
||||
cb->cb.aio_fildes = (int) fd;
|
||||
cb->cb.u.c.buf = data;
|
||||
cb->cb.u.c.offset = offset;
|
||||
cb->cb.u.c.nbytes = nbytes;
|
||||
@@ -274,15 +229,13 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
|
||||
#if 0
|
||||
if (d == DIR_READ) {
|
||||
log_debug("io R off %llu bytes %llu di %d fd %d",
|
||||
log_debug("io R off %llu bytes %llu",
|
||||
(unsigned long long)cb->cb.u.c.offset,
|
||||
(unsigned long long)cb->cb.u.c.nbytes,
|
||||
di, _fd_table[di]);
|
||||
(unsigned long long)cb->cb.u.c.nbytes);
|
||||
} else {
|
||||
log_debug("io W off %llu bytes %llu di %d fd %d",
|
||||
log_debug("io W off %llu bytes %llu",
|
||||
(unsigned long long)cb->cb.u.c.offset,
|
||||
(unsigned long long)cb->cb.u.c.nbytes,
|
||||
di, _fd_table[di]);
|
||||
(unsigned long long)cb->cb.u.c.nbytes);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -318,7 +271,9 @@ static bool _async_wait(struct io_engine *ioe, io_complete_fn fn)
|
||||
struct async_engine *e = _to_async(ioe);
|
||||
|
||||
memset(&event, 0, sizeof(event));
|
||||
r = io_getevents(e->aio_context, 1, MAX_EVENT, event, NULL);
|
||||
do {
|
||||
r = io_getevents(e->aio_context, 1, MAX_EVENT, event, NULL);
|
||||
} while (r == -EINTR);
|
||||
|
||||
if (r < 0) {
|
||||
log_sys_warn("io_getevents");
|
||||
@@ -412,7 +367,7 @@ static void _sync_destroy(struct io_engine *ioe)
|
||||
free(e);
|
||||
}
|
||||
|
||||
static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
|
||||
sector_t sb, sector_t se, void *data, void *context)
|
||||
{
|
||||
int rv;
|
||||
@@ -428,7 +383,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
}
|
||||
|
||||
where = sb * 512;
|
||||
off = lseek(_fd_table[di], where, SEEK_SET);
|
||||
off = lseek(fd, where, SEEK_SET);
|
||||
if (off == (off_t) -1) {
|
||||
log_warn("Device seek error %d for offset %llu", errno, (unsigned long long)where);
|
||||
free(io);
|
||||
@@ -443,12 +398,11 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
/*
|
||||
* If bcache block goes past where lvm wants to write, then clamp it.
|
||||
*/
|
||||
if ((d == DIR_WRITE) && _last_byte_offset && (di == _last_byte_di)) {
|
||||
if ((d == DIR_WRITE) && _last_byte_offset && (fd == _last_byte_fd)) {
|
||||
uint64_t offset = where;
|
||||
uint64_t nbytes = len;
|
||||
sector_t limit_nbytes = 0;
|
||||
sector_t extra_nbytes = 0;
|
||||
sector_t orig_nbytes = 0;
|
||||
|
||||
if (offset > _last_byte_offset) {
|
||||
log_error("Limit write at %llu len %llu beyond last byte %llu",
|
||||
@@ -461,30 +415,9 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
|
||||
if (offset + nbytes > _last_byte_offset) {
|
||||
limit_nbytes = _last_byte_offset - offset;
|
||||
|
||||
if (limit_nbytes % _last_byte_sector_size) {
|
||||
if (limit_nbytes % _last_byte_sector_size)
|
||||
extra_nbytes = _last_byte_sector_size - (limit_nbytes % _last_byte_sector_size);
|
||||
|
||||
/*
|
||||
* adding extra_nbytes to the reduced nbytes (limit_nbytes)
|
||||
* should make the final write size a multiple of the
|
||||
* sector size. This should never result in a final size
|
||||
* larger than the bcache block size (as long as the bcache
|
||||
* block size is a multiple of the sector size).
|
||||
*/
|
||||
if (limit_nbytes + extra_nbytes > nbytes) {
|
||||
log_warn("Skip extending write at %llu len %llu limit %llu extra %llu sector_size %llu",
|
||||
(unsigned long long)offset,
|
||||
(unsigned long long)nbytes,
|
||||
(unsigned long long)limit_nbytes,
|
||||
(unsigned long long)extra_nbytes,
|
||||
(unsigned long long)_last_byte_sector_size);
|
||||
extra_nbytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
orig_nbytes = nbytes;
|
||||
|
||||
if (extra_nbytes) {
|
||||
log_debug("Limit write at %llu len %llu to len %llu rounded to %llu",
|
||||
(unsigned long long)offset,
|
||||
@@ -499,23 +432,6 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
(unsigned long long)limit_nbytes);
|
||||
nbytes = limit_nbytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* This shouldn't happen, the reduced+extended
|
||||
* nbytes value should never be larger than the
|
||||
* bcache block size.
|
||||
*/
|
||||
if (nbytes > orig_nbytes) {
|
||||
log_error("Invalid adjusted write at %llu len %llu adjusted %llu limit %llu extra %llu sector_size %llu",
|
||||
(unsigned long long)offset,
|
||||
(unsigned long long)orig_nbytes,
|
||||
(unsigned long long)nbytes,
|
||||
(unsigned long long)limit_nbytes,
|
||||
(unsigned long long)extra_nbytes,
|
||||
(unsigned long long)_last_byte_sector_size);
|
||||
free(io);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
where = offset;
|
||||
@@ -524,9 +440,9 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int di,
|
||||
|
||||
while (pos < len) {
|
||||
if (d == DIR_READ)
|
||||
rv = read(_fd_table[di], (char *)data + pos, len - pos);
|
||||
rv = read(fd, (char *)data + pos, len - pos);
|
||||
else
|
||||
rv = write(_fd_table[di], (char *)data + pos, len - pos);
|
||||
rv = write(fd, (char *)data + pos, len - pos);
|
||||
|
||||
if (rv == -1 && errno == EINTR)
|
||||
continue;
|
||||
@@ -686,7 +602,7 @@ struct bcache {
|
||||
//----------------------------------------------------------------
|
||||
|
||||
struct key_parts {
|
||||
uint32_t di;
|
||||
uint32_t fd;
|
||||
uint64_t b;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
@@ -695,12 +611,12 @@ union key {
|
||||
uint8_t bytes[12];
|
||||
};
|
||||
|
||||
static struct block *_block_lookup(struct bcache *cache, int di, uint64_t i)
|
||||
static struct block *_block_lookup(struct bcache *cache, int fd, uint64_t i)
|
||||
{
|
||||
union key k;
|
||||
union radix_value v;
|
||||
|
||||
k.parts.di = di;
|
||||
k.parts.fd = fd;
|
||||
k.parts.b = i;
|
||||
|
||||
if (radix_tree_lookup(cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), &v))
|
||||
@@ -714,7 +630,7 @@ static bool _block_insert(struct block *b)
|
||||
union key k;
|
||||
union radix_value v;
|
||||
|
||||
k.parts.di = b->di;
|
||||
k.parts.fd = b->fd;
|
||||
k.parts.b = b->index;
|
||||
v.ptr = b;
|
||||
|
||||
@@ -725,7 +641,7 @@ static void _block_remove(struct block *b)
|
||||
{
|
||||
union key k;
|
||||
|
||||
k.parts.di = b->di;
|
||||
k.parts.fd = b->fd;
|
||||
k.parts.b = b->index;
|
||||
|
||||
radix_tree_remove(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes));
|
||||
@@ -867,7 +783,7 @@ static void _issue_low_level(struct block *b, enum dir d)
|
||||
|
||||
dm_list_move(&cache->io_pending, &b->list);
|
||||
|
||||
if (!cache->engine->issue(cache->engine, d, b->di, sb, se, b->data, b)) {
|
||||
if (!cache->engine->issue(cache->engine, d, b->fd, sb, se, b->data, b)) {
|
||||
/* FIXME: if io_submit() set an errno, return that instead of EIO? */
|
||||
_complete_io(b, -EIO);
|
||||
return;
|
||||
@@ -943,26 +859,21 @@ static struct block *_find_unused_clean_block(struct bcache *cache)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct block *_new_block(struct bcache *cache, int di, block_address i, bool can_wait)
|
||||
static struct block *_new_block(struct bcache *cache, int fd, block_address i, bool can_wait)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
b = _alloc_block(cache);
|
||||
while (!b) {
|
||||
while (!b && !dm_list_empty(&cache->clean)) {
|
||||
b = _find_unused_clean_block(cache);
|
||||
if (!b) {
|
||||
if (can_wait) {
|
||||
if (dm_list_empty(&cache->io_pending))
|
||||
_writeback(cache, 16); // FIXME: magic number
|
||||
_wait_all(cache);
|
||||
if (dm_list_size(&cache->errored) >= cache->max_io) {
|
||||
log_debug("bcache no new blocks for di %d index %u with >%d errors.",
|
||||
di, (uint32_t) i, cache->max_io);
|
||||
return NULL;
|
||||
}
|
||||
_wait_io(cache);
|
||||
} else {
|
||||
log_debug("bcache no new blocks for di %d index %u",
|
||||
di, (uint32_t) i);
|
||||
log_error("bcache no new blocks for fd %d index %u",
|
||||
fd, (uint32_t) i);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@@ -971,7 +882,7 @@ static struct block *_new_block(struct bcache *cache, int di, block_address i, b
|
||||
if (b) {
|
||||
dm_list_init(&b->list);
|
||||
b->flags = 0;
|
||||
b->di = di;
|
||||
b->fd = fd;
|
||||
b->index = i;
|
||||
b->ref_count = 0;
|
||||
b->error = 0;
|
||||
@@ -1017,10 +928,10 @@ static void _miss(struct bcache *cache, unsigned flags)
|
||||
}
|
||||
|
||||
static struct block *_lookup_or_read_block(struct bcache *cache,
|
||||
int di, block_address i,
|
||||
int fd, block_address i,
|
||||
unsigned flags)
|
||||
{
|
||||
struct block *b = _block_lookup(cache, di, i);
|
||||
struct block *b = _block_lookup(cache, fd, i);
|
||||
|
||||
if (b) {
|
||||
// FIXME: this is insufficient. We need to also catch a read
|
||||
@@ -1045,7 +956,7 @@ static struct block *_lookup_or_read_block(struct bcache *cache,
|
||||
} else {
|
||||
_miss(cache, flags);
|
||||
|
||||
b = _new_block(cache, di, i, true);
|
||||
b = _new_block(cache, fd, i, true);
|
||||
if (b) {
|
||||
if (flags & GF_ZERO)
|
||||
_zero_block(b);
|
||||
@@ -1090,7 +1001,6 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
|
||||
struct bcache *cache;
|
||||
unsigned max_io = engine->max_io(engine);
|
||||
long pgsize = sysconf(_SC_PAGESIZE);
|
||||
int i;
|
||||
|
||||
if (pgsize < 0) {
|
||||
log_warn("WARNING: _SC_PAGESIZE returns negative value.");
|
||||
@@ -1151,18 +1061,6 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_fd_table_size = FD_TABLE_INC;
|
||||
|
||||
if (!(_fd_table = malloc(sizeof(int) * _fd_table_size))) {
|
||||
cache->engine->destroy(cache->engine);
|
||||
radix_tree_destroy(cache->rtree);
|
||||
free(cache);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < _fd_table_size; i++)
|
||||
_fd_table[i] = -1;
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
@@ -1178,9 +1076,6 @@ void bcache_destroy(struct bcache *cache)
|
||||
radix_tree_destroy(cache->rtree);
|
||||
cache->engine->destroy(cache->engine);
|
||||
free(cache);
|
||||
free(_fd_table);
|
||||
_fd_table = NULL;
|
||||
_fd_table_size = 0;
|
||||
}
|
||||
|
||||
sector_t bcache_block_sectors(struct bcache *cache)
|
||||
@@ -1198,13 +1093,13 @@ unsigned bcache_max_prefetches(struct bcache *cache)
|
||||
return cache->max_io;
|
||||
}
|
||||
|
||||
void bcache_prefetch(struct bcache *cache, int di, block_address i)
|
||||
void bcache_prefetch(struct bcache *cache, int fd, block_address i)
|
||||
{
|
||||
struct block *b = _block_lookup(cache, di, i);
|
||||
struct block *b = _block_lookup(cache, fd, i);
|
||||
|
||||
if (!b) {
|
||||
if (cache->nr_io_pending < cache->max_io) {
|
||||
b = _new_block(cache, di, i, false);
|
||||
b = _new_block(cache, fd, i, false);
|
||||
if (b) {
|
||||
cache->prefetches++;
|
||||
_issue_read(b);
|
||||
@@ -1222,15 +1117,12 @@ static void _recycle_block(struct bcache *cache, struct block *b)
|
||||
_free_block(b);
|
||||
}
|
||||
|
||||
bool bcache_get(struct bcache *cache, int di, block_address i,
|
||||
bool bcache_get(struct bcache *cache, int fd, block_address i,
|
||||
unsigned flags, struct block **result)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
if (di >= _fd_table_size)
|
||||
goto bad;
|
||||
|
||||
b = _lookup_or_read_block(cache, di, i, flags);
|
||||
b = _lookup_or_read_block(cache, fd, i, flags);
|
||||
if (b) {
|
||||
if (b->error) {
|
||||
if (b->io_dir == DIR_READ) {
|
||||
@@ -1249,10 +1141,10 @@ bool bcache_get(struct bcache *cache, int di, block_address i,
|
||||
*result = b;
|
||||
return true;
|
||||
}
|
||||
bad:
|
||||
|
||||
*result = NULL;
|
||||
|
||||
log_error("bcache failed to get block %u di %d", (uint32_t) i, di);
|
||||
log_error("bcache failed to get block %u fd %d", (uint32_t) i, fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1316,7 +1208,7 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
|
||||
|
||||
if (b->ref_count) {
|
||||
log_warn("bcache_invalidate: block (%d, %llu) still held",
|
||||
b->di, (unsigned long long) b->index);
|
||||
b->fd, (unsigned long long) b->index);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1333,9 +1225,9 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_invalidate(struct bcache *cache, int di, block_address i)
|
||||
bool bcache_invalidate(struct bcache *cache, int fd, block_address i)
|
||||
{
|
||||
return _invalidate_block(cache, _block_lookup(cache, di, i));
|
||||
return _invalidate_block(cache, _block_lookup(cache, fd, i));
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
@@ -1364,14 +1256,14 @@ static bool _invalidate_v(struct radix_tree_iterator *it,
|
||||
|
||||
if (b->error || _test_flags(b, BF_DIRTY)) {
|
||||
log_warn("bcache_invalidate: block (%d, %llu) still dirty",
|
||||
b->di, (unsigned long long) b->index);
|
||||
b->fd, (unsigned long long) b->index);
|
||||
iit->success = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (b->ref_count) {
|
||||
log_warn("bcache_invalidate: block (%d, %llu) still held",
|
||||
b->di, (unsigned long long) b->index);
|
||||
b->fd, (unsigned long long) b->index);
|
||||
iit->success = false;
|
||||
return true;
|
||||
}
|
||||
@@ -1384,138 +1276,42 @@ static bool _invalidate_v(struct radix_tree_iterator *it,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_invalidate_di(struct bcache *cache, int di)
|
||||
bool bcache_invalidate_fd(struct bcache *cache, int fd)
|
||||
{
|
||||
union key k;
|
||||
struct invalidate_iterator it;
|
||||
|
||||
k.parts.di = di;
|
||||
k.parts.fd = fd;
|
||||
|
||||
it.it.visit = _writeback_v;
|
||||
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.di), &it.it);
|
||||
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it);
|
||||
|
||||
_wait_all(cache);
|
||||
|
||||
it.success = true;
|
||||
it.it.visit = _invalidate_v;
|
||||
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.di), &it.it);
|
||||
|
||||
if (it.success)
|
||||
radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.di));
|
||||
|
||||
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it);
|
||||
radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd));
|
||||
return it.success;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static bool _abort_v(struct radix_tree_iterator *it,
|
||||
uint8_t *kb, uint8_t *ke, union radix_value v)
|
||||
void bcache_set_last_byte(struct bcache *cache, int fd, uint64_t offset, int sector_size)
|
||||
{
|
||||
struct block *b = v.ptr;
|
||||
|
||||
if (b->ref_count) {
|
||||
log_fatal("bcache_abort: block (%d, %llu) still held",
|
||||
b->di, (unsigned long long) b->index);
|
||||
return true;
|
||||
}
|
||||
|
||||
_unlink_block(b);
|
||||
_free_block(b);
|
||||
|
||||
// We can't remove the block from the radix tree yet because
|
||||
// we're in the middle of an iteration.
|
||||
return true;
|
||||
}
|
||||
|
||||
void bcache_abort_di(struct bcache *cache, int di)
|
||||
{
|
||||
union key k;
|
||||
struct radix_tree_iterator it;
|
||||
|
||||
k.parts.di = di;
|
||||
|
||||
it.visit = _abort_v;
|
||||
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.di), &it);
|
||||
radix_tree_remove_prefix(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.di));
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
void bcache_set_last_byte(struct bcache *cache, int di, uint64_t offset, int sector_size)
|
||||
{
|
||||
_last_byte_di = di;
|
||||
_last_byte_fd = fd;
|
||||
_last_byte_offset = offset;
|
||||
_last_byte_sector_size = sector_size;
|
||||
if (!sector_size)
|
||||
_last_byte_sector_size = 512;
|
||||
}
|
||||
|
||||
void bcache_unset_last_byte(struct bcache *cache, int di)
|
||||
void bcache_unset_last_byte(struct bcache *cache, int fd)
|
||||
{
|
||||
if (_last_byte_di == di) {
|
||||
_last_byte_di = 0;
|
||||
if (_last_byte_fd == fd) {
|
||||
_last_byte_fd = 0;
|
||||
_last_byte_offset = 0;
|
||||
_last_byte_sector_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int bcache_set_fd(int fd)
|
||||
{
|
||||
int *new_table = NULL;
|
||||
int new_size = 0;
|
||||
int i;
|
||||
|
||||
retry:
|
||||
for (i = 0; i < _fd_table_size; i++) {
|
||||
if (_fd_table[i] == -1) {
|
||||
_fd_table[i] = fd;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
/* already tried once, shouldn't happen */
|
||||
if (new_size)
|
||||
return -1;
|
||||
|
||||
new_size = _fd_table_size + FD_TABLE_INC;
|
||||
|
||||
new_table = realloc(_fd_table, sizeof(int) * new_size);
|
||||
if (!new_table) {
|
||||
log_error("Cannot extend bcache fd table");
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = _fd_table_size; i < new_size; i++)
|
||||
new_table[i] = -1;
|
||||
|
||||
_fd_table = new_table;
|
||||
_fd_table_size = new_size;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should we check for unflushed or inprogress io on an fd
|
||||
* prior to doing clear_fd or change_fd? (To catch mistakes;
|
||||
* the caller should be smart enough to not do that.)
|
||||
*/
|
||||
|
||||
void bcache_clear_fd(int di)
|
||||
{
|
||||
if (di >= _fd_table_size)
|
||||
return;
|
||||
_fd_table[di] = -1;
|
||||
}
|
||||
|
||||
int bcache_change_fd(int di, int fd)
|
||||
{
|
||||
if (di >= _fd_table_size)
|
||||
return 0;
|
||||
if (di < 0) {
|
||||
log_error(INTERNAL_ERROR "Cannot change not openned DI with FD:%d", fd);
|
||||
return 0;
|
||||
}
|
||||
_fd_table[di] = fd;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,19 @@
|
||||
#define BCACHE_H
|
||||
|
||||
#include "device_mapper/all.h"
|
||||
#include "base/memory/container_of.h"
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
// FIXME: move somewhere more sensible
|
||||
#define container_of(v, t, head) \
|
||||
((t *)((const char *)(v) - (const char *)&((t *) 0)->head))
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
enum dir {
|
||||
DIR_READ,
|
||||
DIR_WRITE
|
||||
@@ -34,7 +41,7 @@ typedef void io_complete_fn(void *context, int io_error);
|
||||
|
||||
struct io_engine {
|
||||
void (*destroy)(struct io_engine *e);
|
||||
bool (*issue)(struct io_engine *e, enum dir d, int di,
|
||||
bool (*issue)(struct io_engine *e, enum dir d, int fd,
|
||||
sector_t sb, sector_t se, void *data, void *context);
|
||||
bool (*wait)(struct io_engine *e, io_complete_fn fn);
|
||||
unsigned (*max_io)(struct io_engine *e);
|
||||
@@ -48,7 +55,7 @@ struct io_engine *create_sync_io_engine(void);
|
||||
struct bcache;
|
||||
struct block {
|
||||
/* clients may only access these three fields */
|
||||
int di;
|
||||
int fd;
|
||||
uint64_t index;
|
||||
void *data;
|
||||
|
||||
@@ -106,12 +113,12 @@ unsigned bcache_max_prefetches(struct bcache *cache);
|
||||
* they complete. But we're talking a very small difference, and it's worth it
|
||||
* to keep callbacks out of this interface.
|
||||
*/
|
||||
void bcache_prefetch(struct bcache *cache, int di, block_address index);
|
||||
void bcache_prefetch(struct bcache *cache, int fd, block_address index);
|
||||
|
||||
/*
|
||||
* Returns true on success.
|
||||
*/
|
||||
bool bcache_get(struct bcache *cache, int di, block_address index,
|
||||
bool bcache_get(struct bcache *cache, int fd, block_address index,
|
||||
unsigned flags, struct block **result);
|
||||
void bcache_put(struct block *b);
|
||||
|
||||
@@ -129,42 +136,30 @@ bool bcache_flush(struct bcache *cache);
|
||||
*
|
||||
* If the block is currently held false will be returned.
|
||||
*/
|
||||
bool bcache_invalidate(struct bcache *cache, int di, block_address index);
|
||||
bool bcache_invalidate(struct bcache *cache, int fd, block_address index);
|
||||
|
||||
/*
|
||||
* Invalidates all blocks on the given descriptor. Call this before closing
|
||||
* the descriptor to make sure everything is written back.
|
||||
*/
|
||||
bool bcache_invalidate_di(struct bcache *cache, int di);
|
||||
bool bcache_invalidate_fd(struct bcache *cache, int fd);
|
||||
|
||||
/*
|
||||
* Call this function if flush, or invalidate fail and you do not
|
||||
* wish to retry the writes. This will throw away any dirty data
|
||||
* not written. If any blocks for di are held, then it will call
|
||||
* abort().
|
||||
*/
|
||||
void bcache_abort_di(struct bcache *cache, int di);
|
||||
|
||||
//----------------------------------------------------------------
|
||||
// The next four functions are utilities written in terms of the above api.
|
||||
|
||||
// Prefetches the blocks neccessary to satisfy a byte range.
|
||||
void bcache_prefetch_bytes(struct bcache *cache, int di, uint64_t start, size_t len);
|
||||
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
|
||||
|
||||
// Reads, writes and zeroes bytes. Returns false if errors occur.
|
||||
bool bcache_read_bytes(struct bcache *cache, int di, uint64_t start, size_t len, void *data);
|
||||
bool bcache_write_bytes(struct bcache *cache, int di, uint64_t start, size_t len, void *data);
|
||||
bool bcache_zero_bytes(struct bcache *cache, int di, uint64_t start, size_t len);
|
||||
bool bcache_set_bytes(struct bcache *cache, int di, uint64_t start, size_t len, uint8_t val);
|
||||
bool bcache_invalidate_bytes(struct bcache *cache, int di, uint64_t start, size_t len);
|
||||
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
|
||||
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
|
||||
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
|
||||
bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val);
|
||||
|
||||
void bcache_set_last_byte(struct bcache *cache, int di, uint64_t offset, int sector_size);
|
||||
void bcache_unset_last_byte(struct bcache *cache, int di);
|
||||
void bcache_set_last_byte(struct bcache *cache, int fd, uint64_t offset, int sector_size);
|
||||
void bcache_unset_last_byte(struct bcache *cache, int fd);
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
||||
int bcache_set_fd(int fd); /* returns di */
|
||||
void bcache_clear_fd(int di);
|
||||
int bcache_change_fd(int di, int fd);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
#include "base/memory/zalloc.h"
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/device/dev-type.h"
|
||||
#include "lib/datastruct/btree.h"
|
||||
#include "lib/config/config.h"
|
||||
#include "lib/commands/toolcontext.h"
|
||||
@@ -35,7 +34,7 @@ struct dev_iter {
|
||||
|
||||
struct dir_list {
|
||||
struct dm_list list;
|
||||
char dir[];
|
||||
char dir[0];
|
||||
};
|
||||
|
||||
static struct {
|
||||
@@ -64,9 +63,9 @@ static int _insert(const char *path, const struct stat *info,
|
||||
/* Setup non-zero members of passed zeroed 'struct device' */
|
||||
static void _dev_init(struct device *dev)
|
||||
{
|
||||
dev->phys_block_size = -1;
|
||||
dev->block_size = -1;
|
||||
dev->fd = -1;
|
||||
dev->bcache_fd = -1;
|
||||
dev->bcache_di = -1;
|
||||
dev->read_ahead = -1;
|
||||
|
||||
dev->ext.enabled = 0;
|
||||
@@ -359,14 +358,12 @@ static int _get_sysfs_value(const char *path, char *buf, size_t buf_size, int er
|
||||
int r = 0;
|
||||
|
||||
if (!(fp = fopen(path, "r"))) {
|
||||
if (error_if_no_value)
|
||||
log_sys_error("fopen", path);
|
||||
log_sys_error("fopen", path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!fgets(buf, buf_size, fp)) {
|
||||
if (error_if_no_value)
|
||||
log_sys_error("fgets", path);
|
||||
log_sys_error("fgets", path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1091,7 +1088,7 @@ out:
|
||||
static void _insert_dirs(struct dm_list *dirs)
|
||||
{
|
||||
struct dir_list *dl;
|
||||
struct udev *udev = NULL;
|
||||
struct udev *udev;
|
||||
int with_udev;
|
||||
|
||||
with_udev = obtain_device_list_from_udev() &&
|
||||
@@ -1158,13 +1155,13 @@ static int _insert(const char *path, const struct stat *info,
|
||||
}
|
||||
|
||||
if (rec && !_insert_dir(path))
|
||||
return 0;
|
||||
return_0;
|
||||
} else { /* add a device */
|
||||
if (!S_ISBLK(info->st_mode))
|
||||
return 1;
|
||||
|
||||
if (!_insert_dev(path, info->st_rdev))
|
||||
return 0;
|
||||
return_0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -1423,9 +1420,17 @@ const char *dev_name_confirmed(struct device *dev, int quiet)
|
||||
return dev_name(dev);
|
||||
}
|
||||
|
||||
struct device *dev_hash_get(const char *name)
|
||||
/* Provide a custom reason when a device is ignored */
|
||||
const char *dev_cache_filtered_reason(const char *name)
|
||||
{
|
||||
return (struct device *) dm_hash_lookup(_cache.names, name);
|
||||
const char *reason = "not found";
|
||||
struct device *d = (struct device *) dm_hash_lookup(_cache.names, name);
|
||||
|
||||
if (d)
|
||||
/* FIXME Record which filter caused the exclusion */
|
||||
reason = "excluded by a filter";
|
||||
|
||||
return reason;
|
||||
}
|
||||
|
||||
struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct dev_filter *f)
|
||||
@@ -1456,7 +1461,6 @@ struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct d
|
||||
_insert(name, info_available ? &buf : NULL, 0, obtain_device_list_from_udev());
|
||||
d = (struct device *) dm_hash_lookup(_cache.names, name);
|
||||
if (!d) {
|
||||
log_debug_devs("Device name not found in dev_cache repeat dev_cache_scan for %s", name);
|
||||
dev_cache_scan();
|
||||
d = (struct device *) dm_hash_lookup(_cache.names, name);
|
||||
}
|
||||
@@ -1519,7 +1523,7 @@ struct device *dev_cache_get_by_devt(struct cmd_context *cmd, dev_t dev, struct
|
||||
sysfs_dir = dm_sysfs_dir();
|
||||
if (sysfs_dir && *sysfs_dir) {
|
||||
/* First check if dev is sysfs to avoid useless scan */
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d",
|
||||
if (dm_snprintf(path, sizeof(path), "%s/dev/block/%d:%d",
|
||||
sysfs_dir, (int)MAJOR(dev), (int)MINOR(dev)) < 0) {
|
||||
log_error("dm_snprintf partition failed.");
|
||||
return NULL;
|
||||
@@ -1532,8 +1536,6 @@ struct device *dev_cache_get_by_devt(struct cmd_context *cmd, dev_t dev, struct
|
||||
}
|
||||
}
|
||||
|
||||
log_debug_devs("Device num not found in dev_cache repeat dev_cache_scan for %d:%d",
|
||||
(int)MAJOR(dev), (int)MINOR(dev));
|
||||
dev_cache_scan();
|
||||
d = _dev_cache_seek_devt(dev);
|
||||
}
|
||||
@@ -1632,20 +1634,3 @@ const char *dev_name(const struct device *dev)
|
||||
return (dev && dev->aliases.n) ? dm_list_item(dev->aliases.n, struct dm_str_list)->str :
|
||||
unknown_device_name();
|
||||
}
|
||||
|
||||
bool dev_cache_has_md_with_end_superblock(struct dev_types *dt)
|
||||
{
|
||||
struct btree_iter *iter = btree_first(_cache.devices);
|
||||
struct device *dev;
|
||||
|
||||
while (iter) {
|
||||
dev = btree_get_data(iter);
|
||||
|
||||
if (dev_is_md_with_end_superblock(dt, dev))
|
||||
return true;
|
||||
|
||||
iter = btree_next(iter);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
#define _LVM_DEV_CACHE_H
|
||||
|
||||
#include "lib/device/device.h"
|
||||
#include "lib/device/dev-type.h"
|
||||
#include "lib/misc/lvm-wrappers.h"
|
||||
|
||||
struct cmd_context;
|
||||
@@ -28,7 +27,7 @@ struct cmd_context;
|
||||
struct dev_filter {
|
||||
int (*passes_filter) (struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name);
|
||||
void (*destroy) (struct dev_filter *f);
|
||||
void (*wipe) (struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name);
|
||||
void (*wipe) (struct dev_filter *f);
|
||||
void *private;
|
||||
unsigned use_count;
|
||||
const char *name;
|
||||
@@ -54,11 +53,10 @@ int dev_cache_has_scanned(void);
|
||||
|
||||
int dev_cache_add_dir(const char *path);
|
||||
struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct dev_filter *f);
|
||||
const char *dev_cache_filtered_reason(const char *name);
|
||||
|
||||
struct device *dev_cache_get_by_devt(struct cmd_context *cmd, dev_t device, struct dev_filter *f, int *filtered);
|
||||
|
||||
struct device *dev_hash_get(const char *name);
|
||||
|
||||
void dev_set_preferred_name(struct dm_str_list *sl, struct device *dev);
|
||||
|
||||
/*
|
||||
@@ -73,6 +71,4 @@ void dev_reset_error_count(struct cmd_context *cmd);
|
||||
|
||||
void dev_cache_failed_path(struct device *dev, const char *path);
|
||||
|
||||
bool dev_cache_has_md_with_end_superblock(struct dev_types *dt);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -53,6 +53,77 @@
|
||||
|
||||
static unsigned _dev_size_seqno = 1;
|
||||
|
||||
/*
|
||||
* Get the physical and logical block size for a device.
|
||||
*/
|
||||
int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, unsigned int *block_size)
|
||||
{
|
||||
const char *name = dev_name(dev);
|
||||
int fd = dev->bcache_fd;
|
||||
int do_close = 0;
|
||||
int r = 1;
|
||||
|
||||
if ((dev->phys_block_size > 0) && (dev->block_size > 0)) {
|
||||
*physical_block_size = (unsigned int)dev->phys_block_size;
|
||||
*block_size = (unsigned int)dev->block_size;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (fd <= 0) {
|
||||
if (!dev->open_count) {
|
||||
if (!dev_open_readonly(dev))
|
||||
return_0;
|
||||
do_close = 1;
|
||||
}
|
||||
fd = dev_fd(dev);
|
||||
}
|
||||
|
||||
if (dev->block_size == -1) {
|
||||
if (ioctl(fd, BLKBSZGET, &dev->block_size) < 0) {
|
||||
log_sys_error("ioctl BLKBSZGET", name);
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
log_debug_devs("%s: Block size is %u bytes", name, dev->block_size);
|
||||
}
|
||||
|
||||
#ifdef BLKPBSZGET
|
||||
/* BLKPBSZGET is available in kernel >= 2.6.32 only */
|
||||
if (dev->phys_block_size == -1) {
|
||||
if (ioctl(fd, BLKPBSZGET, &dev->phys_block_size) < 0) {
|
||||
log_sys_error("ioctl BLKPBSZGET", name);
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
log_debug_devs("%s: Physical block size is %u bytes", name, dev->phys_block_size);
|
||||
}
|
||||
#elif defined (BLKSSZGET)
|
||||
/* if we can't get physical block size, just use logical block size instead */
|
||||
if (dev->phys_block_size == -1) {
|
||||
if (ioctl(fd, BLKSSZGET, &dev->phys_block_size) < 0) {
|
||||
log_sys_error("ioctl BLKSSZGET", name);
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
log_debug_devs("%s: Physical block size can't be determined: Using logical block size of %u bytes", name, dev->phys_block_size);
|
||||
}
|
||||
#else
|
||||
/* if even BLKSSZGET is not available, use default 512b */
|
||||
if (dev->phys_block_size == -1) {
|
||||
dev->phys_block_size = 512;
|
||||
log_debug_devs("%s: Physical block size can't be determined: Using block size of %u bytes instead", name, dev->phys_block_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
*physical_block_size = (unsigned int) dev->phys_block_size;
|
||||
*block_size = (unsigned int) dev->block_size;
|
||||
out:
|
||||
if (do_close && !dev_close_immediate(dev))
|
||||
stack;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _dev_get_size_file(struct device *dev, uint64_t *size)
|
||||
{
|
||||
const char *name = dev_name(dev);
|
||||
@@ -86,9 +157,6 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
|
||||
int fd = dev->bcache_fd;
|
||||
int do_close = 0;
|
||||
|
||||
if (dm_list_empty(&dev->aliases))
|
||||
return 0;
|
||||
|
||||
if (dev->size_seqno == _dev_size_seqno) {
|
||||
log_very_verbose("%s: using cached size %" PRIu64 " sectors",
|
||||
name, dev->size);
|
||||
@@ -97,7 +165,7 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
|
||||
}
|
||||
|
||||
if (fd <= 0) {
|
||||
if (!dev_open_readonly_quiet(dev))
|
||||
if (!dev_open_readonly(dev))
|
||||
return_0;
|
||||
fd = dev_fd(dev);
|
||||
do_close = 1;
|
||||
@@ -106,7 +174,7 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
|
||||
if (ioctl(fd, BLKGETSIZE64, size) < 0) {
|
||||
log_sys_error("ioctl BLKGETSIZE64", name);
|
||||
if (do_close && !dev_close_immediate(dev))
|
||||
stack;
|
||||
log_sys_error("close", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -117,7 +185,7 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
|
||||
log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
|
||||
|
||||
if (do_close && !dev_close_immediate(dev))
|
||||
stack;
|
||||
log_sys_error("close", name);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -131,10 +199,8 @@ static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!dev_open_readonly_quiet(dev)) {
|
||||
log_error("Failed to open to get readahead %s", dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
if (!dev_open_readonly(dev))
|
||||
return_0;
|
||||
|
||||
if (ioctl(dev->fd, BLKRAGET, &read_ahead_long) < 0) {
|
||||
log_sys_error("ioctl BLKRAGET", dev_name(dev));
|
||||
@@ -184,60 +250,6 @@ static int _dev_discard_blocks(struct device *dev, uint64_t offset_bytes, uint64
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dev_get_direct_block_sizes(struct device *dev, unsigned int *physical_block_size,
|
||||
unsigned int *logical_block_size)
|
||||
{
|
||||
int fd = dev->bcache_fd;
|
||||
int do_close = 0;
|
||||
unsigned int pbs = 0;
|
||||
unsigned int lbs = 0;
|
||||
|
||||
if (dev->physical_block_size || dev->logical_block_size) {
|
||||
*physical_block_size = dev->physical_block_size;
|
||||
*logical_block_size = dev->logical_block_size;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (fd <= 0) {
|
||||
if (!dev_open_readonly_quiet(dev))
|
||||
return 0;
|
||||
fd = dev_fd(dev);
|
||||
do_close = 1;
|
||||
}
|
||||
|
||||
#ifdef BLKPBSZGET /* not defined before kernel version 2.6.32 (e.g. rhel5) */
|
||||
/*
|
||||
* BLKPBSZGET from kernel comment for blk_queue_physical_block_size:
|
||||
* "the lowest possible sector size that the hardware can operate on
|
||||
* without reverting to read-modify-write operations"
|
||||
*/
|
||||
if (ioctl(fd, BLKPBSZGET, &pbs)) {
|
||||
stack;
|
||||
pbs = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* BLKSSZGET from kernel comment for blk_queue_logical_block_size:
|
||||
* "the lowest possible block size that the storage device can address."
|
||||
*/
|
||||
if (ioctl(fd, BLKSSZGET, &lbs)) {
|
||||
stack;
|
||||
lbs = 0;
|
||||
}
|
||||
|
||||
dev->physical_block_size = pbs;
|
||||
dev->logical_block_size = lbs;
|
||||
|
||||
*physical_block_size = pbs;
|
||||
*logical_block_size = lbs;
|
||||
|
||||
if (do_close && !dev_close_immediate(dev))
|
||||
stack;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Public functions
|
||||
*---------------------------------------------------------------*/
|
||||
@@ -449,8 +461,10 @@ int dev_open_readonly_quiet(struct device *dev)
|
||||
static void _close(struct device *dev)
|
||||
{
|
||||
if (close(dev->fd))
|
||||
log_sys_debug("close", dev_name(dev));
|
||||
log_sys_error("close", dev_name(dev));
|
||||
dev->fd = -1;
|
||||
dev->phys_block_size = -1;
|
||||
dev->block_size = -1;
|
||||
|
||||
log_debug_devs("Closed %s", dev_name(dev));
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/device/dev-type.h"
|
||||
#include "lib/mm/xlate.h"
|
||||
#include "lib/misc/crc.h"
|
||||
#ifdef UDEV_SYNC_SUPPORT
|
||||
#include <libudev.h> /* for MD detection using udev db records */
|
||||
#include "lib/device/dev-ext-udev-constants.h"
|
||||
@@ -49,106 +48,48 @@ static int _dev_has_md_magic(struct device *dev, uint64_t sb_offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define IMSM_SIGNATURE "Intel Raid ISM Cfg Sig. "
|
||||
#define IMSM_SIG_LEN (sizeof(IMSM_SIGNATURE) - 1)
|
||||
|
||||
static int _dev_has_imsm_magic(struct device *dev, uint64_t devsize_sectors)
|
||||
{
|
||||
char imsm_signature[IMSM_SIG_LEN];
|
||||
uint64_t off = (devsize_sectors * 512) - 1024;
|
||||
|
||||
if (!dev_read_bytes(dev, off, IMSM_SIG_LEN, imsm_signature))
|
||||
return_0;
|
||||
|
||||
if (!memcmp(imsm_signature, IMSM_SIGNATURE, IMSM_SIG_LEN))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DDF_MAGIC 0xDE11DE11
|
||||
struct ddf_header {
|
||||
uint32_t magic;
|
||||
uint32_t crc;
|
||||
char guid[24];
|
||||
char revision[8];
|
||||
char padding[472];
|
||||
};
|
||||
|
||||
static int _dev_has_ddf_magic(struct device *dev, uint64_t devsize_sectors, uint64_t *sb_offset)
|
||||
{
|
||||
struct ddf_header hdr;
|
||||
uint32_t crc, our_crc;
|
||||
uint64_t off;
|
||||
uint64_t devsize_bytes = devsize_sectors * 512;
|
||||
|
||||
if (devsize_bytes < 0x30000)
|
||||
return 0;
|
||||
|
||||
/* 512 bytes before the end of device (from libblkid) */
|
||||
off = ((devsize_bytes / 0x200) - 1) * 0x200;
|
||||
|
||||
if (!dev_read_bytes(dev, off, 512, &hdr))
|
||||
return_0;
|
||||
|
||||
if ((hdr.magic == cpu_to_be32(DDF_MAGIC)) ||
|
||||
(hdr.magic == cpu_to_le32(DDF_MAGIC))) {
|
||||
crc = hdr.crc;
|
||||
hdr.crc = 0xffffffff;
|
||||
our_crc = calc_crc(0, (const uint8_t *)&hdr, 512);
|
||||
|
||||
if ((cpu_to_be32(our_crc) == crc) ||
|
||||
(cpu_to_le32(our_crc) == crc)) {
|
||||
*sb_offset = off;
|
||||
return 1;
|
||||
} else {
|
||||
log_debug_devs("Found md ddf magic at %llu wrong crc %x disk %x %s",
|
||||
(unsigned long long)off, our_crc, crc, dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* 128KB before the end of device (from libblkid) */
|
||||
off = ((devsize_bytes / 0x200) - 257) * 0x200;
|
||||
|
||||
if (!dev_read_bytes(dev, off, 512, &hdr))
|
||||
return_0;
|
||||
|
||||
if ((hdr.magic == cpu_to_be32(DDF_MAGIC)) ||
|
||||
(hdr.magic == cpu_to_le32(DDF_MAGIC))) {
|
||||
crc = hdr.crc;
|
||||
hdr.crc = 0xffffffff;
|
||||
our_crc = calc_crc(0, (const uint8_t *)&hdr, 512);
|
||||
|
||||
if ((cpu_to_be32(our_crc) == crc) ||
|
||||
(cpu_to_le32(our_crc) == crc)) {
|
||||
*sb_offset = off;
|
||||
return 1;
|
||||
} else {
|
||||
log_debug_devs("Found md ddf magic at %llu wrong crc %x disk %x %s",
|
||||
(unsigned long long)off, our_crc, crc, dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* _udev_dev_is_md_component() only works if
|
||||
* external_device_info_source="udev"
|
||||
*
|
||||
* but
|
||||
*
|
||||
* udev_dev_is_md_component() in dev-type.c only works if
|
||||
* obtain_device_list_from_udev=1
|
||||
*
|
||||
* and neither of those config setting matches very well
|
||||
* with what we're doing here.
|
||||
* Calculate the position of the superblock.
|
||||
* It is always aligned to a 4K boundary and
|
||||
* depending on minor_version, it can be:
|
||||
* 0: At least 8K, but less than 12K, from end of device
|
||||
* 1: At start of device
|
||||
* 2: 4K from start of device.
|
||||
*/
|
||||
typedef enum {
|
||||
MD_MINOR_VERSION_MIN,
|
||||
MD_MINOR_V0 = MD_MINOR_VERSION_MIN,
|
||||
MD_MINOR_V1,
|
||||
MD_MINOR_V2,
|
||||
MD_MINOR_VERSION_MAX = MD_MINOR_V2
|
||||
} md_minor_version_t;
|
||||
|
||||
static uint64_t _v1_sb_offset(uint64_t size, md_minor_version_t minor_version)
|
||||
{
|
||||
uint64_t sb_offset;
|
||||
|
||||
switch(minor_version) {
|
||||
case MD_MINOR_V0:
|
||||
sb_offset = (size - 8 * 2) & ~(4 * 2 - 1ULL);
|
||||
break;
|
||||
case MD_MINOR_V1:
|
||||
sb_offset = 0;
|
||||
break;
|
||||
case MD_MINOR_V2:
|
||||
sb_offset = 4 * 2;
|
||||
break;
|
||||
default:
|
||||
log_warn(INTERNAL_ERROR "WARNING: Unknown minor version %d.",
|
||||
minor_version);
|
||||
return 0;
|
||||
}
|
||||
sb_offset <<= SECTOR_SHIFT;
|
||||
|
||||
return sb_offset;
|
||||
}
|
||||
|
||||
#ifdef UDEV_SYNC_SUPPORT
|
||||
static int _udev_dev_is_md_component(struct device *dev)
|
||||
static int _udev_dev_is_md(struct device *dev)
|
||||
{
|
||||
const char *value;
|
||||
struct dev_ext *ext;
|
||||
@@ -156,17 +97,14 @@ static int _udev_dev_is_md_component(struct device *dev)
|
||||
if (!(ext = dev_ext_get(dev)))
|
||||
return_0;
|
||||
|
||||
if (!(value = udev_device_get_property_value((struct udev_device *)ext->handle, DEV_EXT_UDEV_BLKID_TYPE))) {
|
||||
dev->flags |= DEV_UDEV_INFO_MISSING;
|
||||
if (!(value = udev_device_get_property_value((struct udev_device *)ext->handle, DEV_EXT_UDEV_BLKID_TYPE)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return !strcmp(value, DEV_EXT_UDEV_BLKID_TYPE_SW_RAID);
|
||||
}
|
||||
#else
|
||||
static int _udev_dev_is_md_component(struct device *dev)
|
||||
static int _udev_dev_is_md(struct device *dev)
|
||||
{
|
||||
dev->flags |= DEV_UDEV_INFO_MISSING;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -174,9 +112,10 @@ static int _udev_dev_is_md_component(struct device *dev)
|
||||
/*
|
||||
* Returns -1 on error
|
||||
*/
|
||||
static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_found, int full)
|
||||
static int _native_dev_is_md(struct device *dev, uint64_t *offset_found, int full)
|
||||
{
|
||||
uint64_t size, sb_offset = 0;
|
||||
md_minor_version_t minor;
|
||||
uint64_t size, sb_offset;
|
||||
int ret;
|
||||
|
||||
if (!scan_bcache)
|
||||
@@ -191,9 +130,9 @@ static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_foun
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Some md versions locate the magic number at the end of the device.
|
||||
* Those checks can't be satisfied with the initial scan data, and
|
||||
* require an extra read i/o at the end of every device. Issuing
|
||||
* Old md versions locate the magic number at the end of the device.
|
||||
* Those checks can't be satisfied with the initial bcache data, and
|
||||
* would require an extra read i/o at the end of every device. Issuing
|
||||
* an extra read to every device in every command, just to check for
|
||||
* the old md format is a bad tradeoff.
|
||||
*
|
||||
@@ -204,81 +143,42 @@ static int _native_dev_is_md_component(struct device *dev, uint64_t *offset_foun
|
||||
* and set it for commands that could possibly write to an md dev
|
||||
* (pvcreate/vgcreate/vgextend).
|
||||
*/
|
||||
|
||||
/*
|
||||
* md superblock version 1.1 at offset 0 from start
|
||||
*/
|
||||
|
||||
if (_dev_has_md_magic(dev, 0)) {
|
||||
log_debug_devs("Found md magic number at offset 0 of %s.", dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* md superblock version 1.2 at offset 4KB from start
|
||||
*/
|
||||
|
||||
if (_dev_has_md_magic(dev, 4096)) {
|
||||
log_debug_devs("Found md magic number at offset 4096 of %s.", dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!full) {
|
||||
sb_offset = 0;
|
||||
if (_dev_has_md_magic(dev, sb_offset)) {
|
||||
log_debug_devs("Found md magic number at offset 0 of %s.", dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sb_offset = 8 << SECTOR_SHIFT;
|
||||
if (_dev_has_md_magic(dev, sb_offset)) {
|
||||
log_debug_devs("Found md magic number at offset %d of %s.", (int)sb_offset, dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle superblocks at the end of the device.
|
||||
*/
|
||||
|
||||
/*
|
||||
* md superblock version 0 at 64KB from end of device
|
||||
* (after end is aligned to 64KB)
|
||||
*/
|
||||
|
||||
/* Check if it is an md component device. */
|
||||
/* Version 0.90.0 */
|
||||
sb_offset = MD_NEW_SIZE_SECTORS(size) << SECTOR_SHIFT;
|
||||
|
||||
if (_dev_has_md_magic(dev, sb_offset)) {
|
||||
log_debug_devs("Found md magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* md superblock version 1.0 at 8KB from end of device
|
||||
*/
|
||||
|
||||
sb_offset = ((size - 8 * 2) & ~(4 * 2 - 1ULL)) << SECTOR_SHIFT;
|
||||
|
||||
if (_dev_has_md_magic(dev, sb_offset)) {
|
||||
log_debug_devs("Found md magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* md imsm superblock 1K from end of device
|
||||
*/
|
||||
|
||||
if (_dev_has_imsm_magic(dev, size)) {
|
||||
log_debug_devs("Found md imsm magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev));
|
||||
sb_offset = 1024;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* md ddf superblock 512 bytes from end, or 128KB from end
|
||||
*/
|
||||
|
||||
if (_dev_has_ddf_magic(dev, size, &sb_offset)) {
|
||||
log_debug_devs("Found md ddf magic number at offset %llu of %s.", (unsigned long long)sb_offset, dev_name(dev));
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
minor = MD_MINOR_VERSION_MIN;
|
||||
/* Version 1, try v1.0 -> v1.2 */
|
||||
do {
|
||||
sb_offset = _v1_sb_offset(size, minor);
|
||||
if (_dev_has_md_magic(dev, sb_offset)) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
} while (++minor <= MD_MINOR_VERSION_MAX);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
@@ -288,7 +188,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dev_is_md_component(struct device *dev, uint64_t *offset_found, int full)
|
||||
int dev_is_md(struct device *dev, uint64_t *offset_found, int full)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -298,25 +198,19 @@ int dev_is_md_component(struct device *dev, uint64_t *offset_found, int full)
|
||||
* information is not in udev db.
|
||||
*/
|
||||
if ((dev->ext.src == DEV_EXT_NONE) || offset_found) {
|
||||
ret = _native_dev_is_md_component(dev, offset_found, full);
|
||||
ret = _native_dev_is_md(dev, offset_found, full);
|
||||
|
||||
if (!full) {
|
||||
if (!ret || (ret == -EAGAIN)) {
|
||||
if (udev_dev_is_md_component(dev))
|
||||
ret = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (ret && (ret != -EAGAIN))
|
||||
dev->flags |= DEV_IS_MD_COMPONENT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev->ext.src == DEV_EXT_UDEV) {
|
||||
ret = _udev_dev_is_md_component(dev);
|
||||
if (ret && (ret != -EAGAIN))
|
||||
dev->flags |= DEV_IS_MD_COMPONENT;
|
||||
return ret;
|
||||
}
|
||||
if (dev->ext.src == DEV_EXT_UDEV)
|
||||
return _udev_dev_is_md(dev);
|
||||
|
||||
log_error(INTERNAL_ERROR "Missing hook for MD device recognition "
|
||||
"using external device info source %s", dev_ext_name(dev));
|
||||
@@ -386,12 +280,12 @@ static int _md_sysfs_attribute_scanf(struct dev_types *dt,
|
||||
return ret;
|
||||
|
||||
if (!(fp = fopen(path, "r"))) {
|
||||
log_debug("_md_sysfs_attribute_scanf fopen failed %s", path);
|
||||
log_sys_error("fopen", path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!fgets(buffer, sizeof(buffer), fp)) {
|
||||
log_debug("_md_sysfs_attribute_scanf fgets failed %s", path);
|
||||
log_sys_error("fgets", path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -533,7 +427,7 @@ int dev_is_md_with_end_superblock(struct dev_types *dt, struct device *dev)
|
||||
|
||||
if (_md_sysfs_attribute_scanf(dt, dev, attribute,
|
||||
"%s", &version_string) != 1)
|
||||
return 0;
|
||||
return -1;
|
||||
|
||||
log_very_verbose("Device %s %s is %s.",
|
||||
dev_name(dev), attribute, version_string);
|
||||
@@ -545,7 +439,7 @@ int dev_is_md_with_end_superblock(struct dev_types *dt, struct device *dev)
|
||||
|
||||
#else
|
||||
|
||||
int dev_is_md_component(struct device *dev __attribute__((unused)),
|
||||
int dev_is_md(struct device *dev __attribute__((unused)),
|
||||
uint64_t *sb __attribute__((unused)))
|
||||
{
|
||||
return 0;
|
||||
|
||||
@@ -42,6 +42,7 @@ int dev_is_pmem(struct device *dev)
|
||||
{
|
||||
FILE *fp;
|
||||
char path[PATH_MAX];
|
||||
char buffer[64];
|
||||
int is_pmem = 0;
|
||||
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/queue/dax",
|
||||
@@ -55,16 +56,27 @@ int dev_is_pmem(struct device *dev)
|
||||
if (!(fp = fopen(path, "r")))
|
||||
return 0;
|
||||
|
||||
if (fscanf(fp, "%d", &is_pmem) != 1)
|
||||
log_warn("Failed to parse DAX %s.", path);
|
||||
|
||||
if (is_pmem)
|
||||
log_debug("%s is pmem", dev_name(dev));
|
||||
if (!fgets(buffer, sizeof(buffer), fp)) {
|
||||
log_warn("Failed to read %s.", path);
|
||||
if (fclose(fp))
|
||||
log_sys_debug("fclose", path);
|
||||
return 0;
|
||||
} else if (sscanf(buffer, "%d", &is_pmem) != 1) {
|
||||
log_warn("Failed to parse %s '%s'.", path, buffer);
|
||||
if (fclose(fp))
|
||||
log_sys_debug("fclose", path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (fclose(fp))
|
||||
log_sys_debug("fclose", path);
|
||||
|
||||
return is_pmem ? 1 : 0;
|
||||
if (is_pmem) {
|
||||
log_debug("%s is pmem", dev_name(dev));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dev_is_lv(struct device *dev)
|
||||
@@ -72,7 +84,6 @@ int dev_is_lv(struct device *dev)
|
||||
FILE *fp;
|
||||
char path[PATH_MAX];
|
||||
char buffer[64];
|
||||
int ret = 0;
|
||||
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/dm/uuid",
|
||||
dm_sysfs_dir(),
|
||||
@@ -85,15 +96,17 @@ int dev_is_lv(struct device *dev)
|
||||
if (!(fp = fopen(path, "r")))
|
||||
return 0;
|
||||
|
||||
if (!fgets(buffer, sizeof(buffer), fp))
|
||||
log_debug("Failed to read %s.", path);
|
||||
else if (!strncmp(buffer, "LVM-", 4))
|
||||
ret = 1;
|
||||
if (!fgets(buffer, sizeof(buffer), fp)) {
|
||||
log_warn("Failed to read %s.", path);
|
||||
fclose(fp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (fclose(fp))
|
||||
log_sys_debug("fclose", path);
|
||||
fclose(fp);
|
||||
|
||||
return ret;
|
||||
if (!strncmp(buffer, "LVM-", 4))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dev_types *create_dev_types(const char *proc_dir,
|
||||
@@ -646,31 +659,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef BLKID_WIPING_SUPPORT
|
||||
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
|
||||
{
|
||||
char *block_size_str = NULL;
|
||||
|
||||
if ((block_size_str = blkid_get_tag_value(NULL, "BLOCK_SIZE", dev_name(dev)))) {
|
||||
*fs_block_size = (uint32_t)atoi(block_size_str);
|
||||
free(block_size_str);
|
||||
log_debug("Found blkid BLOCK_SIZE %u for fs on %s", *fs_block_size, dev_name(dev));
|
||||
return 1;
|
||||
} else {
|
||||
log_debug("No blkid BLOCK_SIZE for fs on %s", dev_name(dev));
|
||||
*fs_block_size = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
|
||||
{
|
||||
log_debug("Disabled blkid BLOCK_SIZE for fs.");
|
||||
*fs_block_size = 0;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef BLKID_WIPING_SUPPORT
|
||||
|
||||
static inline int _type_in_flag_list(const char *type, uint32_t flag_list)
|
||||
@@ -878,7 +866,7 @@ static int _wipe_known_signatures_with_lvm(struct device *dev, const char *name,
|
||||
wiped = &wiped_tmp;
|
||||
*wiped = 0;
|
||||
|
||||
if (!_wipe_signature(dev, "software RAID md superblock", name, 4, yes, force, wiped, dev_is_md_component) ||
|
||||
if (!_wipe_signature(dev, "software RAID md superblock", name, 4, yes, force, wiped, dev_is_md) ||
|
||||
!_wipe_signature(dev, "swap signature", name, 10, yes, force, wiped, dev_is_swap) ||
|
||||
!_wipe_signature(dev, "LUKS signature", name, 8, yes, force, wiped, dev_is_luks))
|
||||
return 0;
|
||||
@@ -1130,9 +1118,6 @@ static struct udev_device *_udev_get_dev(struct device *dev)
|
||||
i + 1, UDEV_DEV_IS_COMPONENT_ITERATION_COUNT,
|
||||
i * UDEV_DEV_IS_COMPONENT_USLEEP);
|
||||
|
||||
if (!udev_sleeping())
|
||||
break;
|
||||
|
||||
usleep(UDEV_DEV_IS_COMPONENT_USLEEP);
|
||||
i++;
|
||||
}
|
||||
@@ -1185,10 +1170,8 @@ int udev_dev_is_md_component(struct device *dev)
|
||||
const char *value;
|
||||
int ret = 0;
|
||||
|
||||
if (!obtain_device_list_from_udev()) {
|
||||
dev->flags |= DEV_UDEV_INFO_MISSING;
|
||||
if (!obtain_device_list_from_udev())
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(udev_device = _udev_get_dev(dev)))
|
||||
return 0;
|
||||
@@ -1197,7 +1180,6 @@ int udev_dev_is_md_component(struct device *dev)
|
||||
if (value && !strcmp(value, DEV_EXT_UDEV_BLKID_TYPE_SW_RAID)) {
|
||||
log_debug("Device %s is md raid component based on blkid variable in udev db (%s=\"%s\").",
|
||||
dev_name(dev), DEV_EXT_UDEV_BLKID_TYPE, value);
|
||||
dev->flags |= DEV_IS_MD_COMPONENT;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
@@ -1215,7 +1197,6 @@ int udev_dev_is_mpath_component(struct device *dev)
|
||||
|
||||
int udev_dev_is_md_component(struct device *dev)
|
||||
{
|
||||
dev->flags |= DEV_UDEV_INFO_MISSING;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ const char *dev_subsystem_name(struct dev_types *dt, struct device *dev);
|
||||
int major_is_scsi_device(struct dev_types *dt, int major);
|
||||
|
||||
/* Signature/superblock recognition with position returned where found. */
|
||||
int dev_is_md_component(struct device *dev, uint64_t *sb, int full);
|
||||
int dev_is_md(struct device *dev, uint64_t *sb, int full);
|
||||
int dev_is_swap(struct device *dev, uint64_t *signature, int full);
|
||||
int dev_is_luks(struct device *dev, uint64_t *signature, int full);
|
||||
int dasd_is_cdl_formatted(struct device *dev);
|
||||
@@ -97,6 +97,4 @@ int dev_is_pmem(struct device *dev);
|
||||
|
||||
int dev_is_lv(struct device *dev);
|
||||
|
||||
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -36,8 +36,6 @@
|
||||
#define DEV_FILTER_OUT_SCAN 0x00004000 /* filtered out during label scan */
|
||||
#define DEV_BCACHE_WRITE 0x00008000 /* bcache_fd is open with RDWR */
|
||||
#define DEV_SCAN_FOUND_LABEL 0x00010000 /* label scan read dev and found label */
|
||||
#define DEV_IS_MD_COMPONENT 0x00020000 /* device is an md component */
|
||||
#define DEV_UDEV_INFO_MISSING 0x00040000 /* we have no udev info for this device */
|
||||
|
||||
/*
|
||||
* Support for external device info.
|
||||
@@ -67,13 +65,11 @@ struct device {
|
||||
/* private */
|
||||
int fd;
|
||||
int open_count;
|
||||
int physical_block_size; /* From BLKPBSZGET: lowest possible sector size that the hardware can operate on without reverting to read-modify-write operations */
|
||||
int logical_block_size; /* From BLKSSZGET: lowest possible block size that the storage device can address */
|
||||
int phys_block_size;
|
||||
int block_size;
|
||||
int read_ahead;
|
||||
int bcache_fd;
|
||||
int bcache_di;
|
||||
uint32_t flags;
|
||||
uint32_t filtered_flags;
|
||||
unsigned size_seqno;
|
||||
uint64_t size;
|
||||
uint64_t end;
|
||||
@@ -133,8 +129,7 @@ void dev_size_seqno_inc(void);
|
||||
/*
|
||||
* All io should use these routines.
|
||||
*/
|
||||
int dev_get_direct_block_sizes(struct device *dev, unsigned int *physical_block_size,
|
||||
unsigned int *logical_block_size);
|
||||
int dev_get_block_size(struct device *dev, unsigned int *phys_block_size, unsigned int *block_size);
|
||||
int dev_get_size(struct device *dev, uint64_t *size);
|
||||
int dev_get_read_ahead(struct device *dev, uint32_t *read_ahead);
|
||||
int dev_discard_blocks(struct device *dev, uint64_t offset_bytes, uint64_t size_bytes);
|
||||
|
||||
@@ -406,12 +406,10 @@ int lvdisplay_full(struct cmd_context *cmd,
|
||||
struct lv_segment *seg = NULL;
|
||||
int lvm1compat;
|
||||
dm_percent_t snap_percent;
|
||||
int thin_pool_active = 0;
|
||||
dm_percent_t thin_data_percent = 0, thin_metadata_percent = 0;
|
||||
int thin_data_active = 0, thin_metadata_active = 0;
|
||||
dm_percent_t thin_data_percent, thin_metadata_percent;
|
||||
int thin_active = 0;
|
||||
dm_percent_t thin_percent = 0;
|
||||
struct lv_status_thin *thin_status = NULL;
|
||||
struct lv_status_thin_pool *thin_pool_status = NULL;
|
||||
dm_percent_t thin_percent;
|
||||
struct lv_status_cache *cache_status = NULL;
|
||||
struct lv_status_vdo *vdo_status = NULL;
|
||||
|
||||
@@ -505,18 +503,15 @@ int lvdisplay_full(struct cmd_context *cmd,
|
||||
if (seg->merge_lv)
|
||||
log_print("LV merging to %s",
|
||||
seg->merge_lv->name);
|
||||
if (inkernel && (thin_active = lv_thin_status(lv, 0, &thin_status))) {
|
||||
thin_percent = thin_status->usage;
|
||||
dm_pool_destroy(thin_status->mem);
|
||||
}
|
||||
if (inkernel)
|
||||
thin_active = lv_thin_percent(lv, 0, &thin_percent);
|
||||
if (lv_is_merging_origin(lv))
|
||||
log_print("LV merged with %s",
|
||||
find_snapshot(lv)->lv->name);
|
||||
} else if (lv_is_thin_pool(lv)) {
|
||||
if ((thin_pool_active = lv_thin_pool_status(lv, 0, &thin_pool_status))) {
|
||||
thin_data_percent = thin_pool_status->data_usage;
|
||||
thin_metadata_percent = thin_pool_status->metadata_usage;
|
||||
dm_pool_destroy(thin_pool_status->mem);
|
||||
if (lv_info(cmd, lv, 1, &info, 1, 1) && info.exists) {
|
||||
thin_data_active = lv_thin_pool_percent(lv, 0, &thin_data_percent);
|
||||
thin_metadata_active = lv_thin_pool_percent(lv, 1, &thin_metadata_percent);
|
||||
}
|
||||
/* FIXME: display thin_pool targets transid for activated LV as well */
|
||||
seg = first_seg(lv);
|
||||
@@ -527,10 +522,10 @@ int lvdisplay_full(struct cmd_context *cmd,
|
||||
log_print("LV origin of Cache LV %s", seg->lv->name);
|
||||
} else if (lv_is_cache(lv)) {
|
||||
seg = first_seg(lv);
|
||||
if (inkernel && lv_cache_status(lv, &cache_status)) {
|
||||
log_print("LV Cache pool name %s", seg->pool_lv->name);
|
||||
log_print("LV Cache origin name %s", seg_lv(seg, 0)->name);
|
||||
}
|
||||
if (inkernel && !lv_cache_status(lv, &cache_status))
|
||||
return_0;
|
||||
log_print("LV Cache pool name %s", seg->pool_lv->name);
|
||||
log_print("LV Cache origin name %s", seg_lv(seg, 0)->name);
|
||||
} else if (lv_is_cache_pool(lv)) {
|
||||
seg = first_seg(lv);
|
||||
log_print("LV Pool metadata %s", seg->metadata_lv->name);
|
||||
@@ -538,7 +533,7 @@ int lvdisplay_full(struct cmd_context *cmd,
|
||||
} else if (lv_is_vdo_pool(lv)) {
|
||||
seg = first_seg(lv);
|
||||
log_print("LV VDO Pool data %s", seg_lv(seg, 0)->name);
|
||||
if (lv_vdo_pool_status(lv, 0, &vdo_status)) { /* FIXME: flush option? */
|
||||
if (inkernel && lv_vdo_pool_status(lv, 0, &vdo_status)) { /* FIXME: flush option? */
|
||||
log_print("LV VDO Pool usage %s%%",
|
||||
display_percent(cmd, vdo_status->usage));
|
||||
log_print("LV VDO Pool saving %s%%",
|
||||
@@ -596,12 +591,13 @@ int lvdisplay_full(struct cmd_context *cmd,
|
||||
dm_pool_destroy(cache_status->mem);
|
||||
}
|
||||
|
||||
if (thin_pool_active) {
|
||||
if (thin_data_active)
|
||||
log_print("Allocated pool data %s%%",
|
||||
display_percent(cmd, thin_data_percent));
|
||||
|
||||
if (thin_metadata_active)
|
||||
log_print("Allocated metadata %s%%",
|
||||
display_percent(cmd, thin_metadata_percent));
|
||||
}
|
||||
|
||||
if (thin_active)
|
||||
log_print("Mapped size %s%%",
|
||||
|
||||
@@ -60,16 +60,13 @@ static void _composite_destroy(struct dev_filter *f)
|
||||
free(f);
|
||||
}
|
||||
|
||||
static void _wipe(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
static void _wipe(struct dev_filter *f)
|
||||
{
|
||||
struct dev_filter **filters;
|
||||
|
||||
for (filters = (struct dev_filter **) f->private; *filters; ++filters) {
|
||||
if (use_filter_name && strcmp((*filters)->name, use_filter_name))
|
||||
continue;
|
||||
for (filters = (struct dev_filter **) f->private; *filters; ++filters)
|
||||
if ((*filters)->wipe)
|
||||
(*filters)->wipe(cmd, *filters, dev, use_filter_name);
|
||||
}
|
||||
(*filters)->wipe(*filters);
|
||||
}
|
||||
|
||||
struct dev_filter *composite_filter_create(int n, int use_dev_ext_info, struct dev_filter **filters)
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include "base/memory/zalloc.h"
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/filters/filter.h"
|
||||
#include "lib/commands/toolcontext.h"
|
||||
|
||||
#ifdef UDEV_SYNC_SUPPORT
|
||||
#include <libudev.h>
|
||||
@@ -70,11 +69,6 @@ static int _ignore_fwraid(struct cmd_context *cmd, struct dev_filter *f __attrib
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (cmd->filter_nodata_only)
|
||||
return 1;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_FWRAID;
|
||||
|
||||
if (!fwraid_filtering())
|
||||
return 1;
|
||||
|
||||
@@ -86,14 +80,12 @@ static int _ignore_fwraid(struct cmd_context *cmd, struct dev_filter *f __attrib
|
||||
else
|
||||
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
|
||||
dev_ext_name(dev), dev->ext.handle);
|
||||
dev->filtered_flags |= DEV_FILTERED_FWRAID;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
log_debug_devs("%s: Skipping: error in firmware RAID component detection",
|
||||
dev_name(dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_FWRAID;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,8 +42,6 @@ static int _passes_internal(struct cmd_context *cmd, struct dev_filter *f __attr
|
||||
{
|
||||
struct device_list *devl;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_INTERNAL;
|
||||
|
||||
if (!internal_filtering())
|
||||
return 1;
|
||||
|
||||
@@ -52,7 +50,6 @@ static int _passes_internal(struct cmd_context *cmd, struct dev_filter *f __attr
|
||||
return 1;
|
||||
}
|
||||
|
||||
dev->filtered_flags |= DEV_FILTERED_INTERNAL;
|
||||
log_debug_devs("%s: Skipping for internal filtering.", dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -86,11 +86,6 @@ static int _passes_md_filter(struct cmd_context *cmd, struct dev_filter *f __att
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (cmd->filter_nodata_only)
|
||||
return 1;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_MD_COMPONENT;
|
||||
|
||||
/*
|
||||
* When md_component_dectection=0, don't even try to skip md
|
||||
* components.
|
||||
@@ -98,7 +93,7 @@ static int _passes_md_filter(struct cmd_context *cmd, struct dev_filter *f __att
|
||||
if (!md_filtering())
|
||||
return 1;
|
||||
|
||||
ret = dev_is_md_component(dev, NULL, cmd->use_full_md_check);
|
||||
ret = dev_is_md(dev, NULL, cmd->use_full_md_check);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
/* let pass, call again after scan */
|
||||
@@ -117,14 +112,12 @@ static int _passes_md_filter(struct cmd_context *cmd, struct dev_filter *f __att
|
||||
else
|
||||
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
|
||||
dev_ext_name(dev), dev->ext.handle);
|
||||
dev->filtered_flags |= DEV_FILTERED_MD_COMPONENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
log_debug_devs("%s: Skipping: error in md component detection",
|
||||
dev_name(dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_MD_COMPONENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -274,15 +274,12 @@ static int _dev_is_mpath(struct dev_filter *f, struct device *dev)
|
||||
|
||||
static int _ignore_mpath(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
{
|
||||
dev->filtered_flags &= ~DEV_FILTERED_MPATH_COMPONENT;
|
||||
|
||||
if (_dev_is_mpath(f, dev) == 1) {
|
||||
if (dev->ext.src == DEV_EXT_NONE)
|
||||
log_debug_devs(MSG_SKIPPING, dev_name(dev));
|
||||
else
|
||||
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
|
||||
dev_ext_name(dev), dev->ext.handle);
|
||||
dev->filtered_flags |= DEV_FILTERED_MPATH_COMPONENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include "base/memory/zalloc.h"
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/filters/filter.h"
|
||||
#include "lib/commands/toolcontext.h"
|
||||
|
||||
#define MSG_SKIPPING "%s: Skipping: Partition table signature found"
|
||||
|
||||
@@ -25,11 +24,6 @@ static int _passes_partitioned_filter(struct cmd_context *cmd, struct dev_filter
|
||||
struct dev_types *dt = (struct dev_types *) f->private;
|
||||
int ret;
|
||||
|
||||
if (cmd->filter_nodata_only)
|
||||
return 1;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_PARTITIONED;
|
||||
|
||||
ret = dev_is_partitioned(dt, dev);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
@@ -45,7 +39,6 @@ static int _passes_partitioned_filter(struct cmd_context *cmd, struct dev_filter
|
||||
else
|
||||
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
|
||||
dev_ext_name(dev), dev->ext.handle);
|
||||
dev->filtered_flags |= DEV_FILTERED_PARTITIONED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -64,17 +64,11 @@ static int _init_hash(struct pfilter *pf)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void _persistent_filter_wipe(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
static void _persistent_filter_wipe(struct dev_filter *f)
|
||||
{
|
||||
struct pfilter *pf = (struct pfilter *) f->private;
|
||||
struct dm_str_list *sl;
|
||||
|
||||
if (!dev) {
|
||||
dm_hash_wipe(pf->devices);
|
||||
} else {
|
||||
dm_list_iterate_items(sl, &dev->aliases)
|
||||
dm_hash_remove(pf->devices, sl->str);
|
||||
}
|
||||
dm_hash_wipe(pf->devices);
|
||||
}
|
||||
|
||||
static int _lookup_p(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
|
||||
@@ -151,8 +151,6 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
|
||||
struct rfilter *rf = (struct rfilter *) f->private;
|
||||
struct dm_str_list *sl;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_REGEX;
|
||||
|
||||
dm_list_iterate_items(sl, &dev->aliases) {
|
||||
m = dm_regex_match(rf->engine, sl->str);
|
||||
|
||||
@@ -170,10 +168,8 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
|
||||
first = 0;
|
||||
}
|
||||
|
||||
if (rejected) {
|
||||
dev->filtered_flags |= DEV_FILTERED_REGEX;
|
||||
if (rejected)
|
||||
log_debug_devs("%s: Skipping (regex)", dev_name(dev));
|
||||
}
|
||||
|
||||
/*
|
||||
* pass everything that doesn't match
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include "base/memory/zalloc.h"
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/filters/filter.h"
|
||||
#include "lib/commands/toolcontext.h"
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
@@ -28,11 +27,6 @@ static int _ignore_signature(struct cmd_context *cmd, struct dev_filter *f __att
|
||||
char buf[BUFSIZE];
|
||||
int ret = 0;
|
||||
|
||||
if (cmd->filter_nodata_only)
|
||||
return 1;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_SIGNATURE;
|
||||
|
||||
if (!scan_bcache) {
|
||||
/* let pass, call again after scan */
|
||||
log_debug_devs("filter signature deferred %s", dev_name(dev));
|
||||
@@ -46,21 +40,18 @@ static int _ignore_signature(struct cmd_context *cmd, struct dev_filter *f __att
|
||||
log_debug_devs("%s: Skipping: error in signature detection",
|
||||
dev_name(dev));
|
||||
ret = 0;
|
||||
dev->filtered_flags |= DEV_FILTERED_SIGNATURE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev_is_lvm1(dev, buf, BUFSIZE)) {
|
||||
log_debug_devs("%s: Skipping lvm1 device", dev_name(dev));
|
||||
ret = 0;
|
||||
dev->filtered_flags |= DEV_FILTERED_SIGNATURE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev_is_pool(dev, buf, BUFSIZE)) {
|
||||
log_debug_devs("%s: Skipping gfs-pool device", dev_name(dev));
|
||||
ret = 0;
|
||||
dev->filtered_flags |= DEV_FILTERED_SIGNATURE;
|
||||
goto out;
|
||||
}
|
||||
ret = 1;
|
||||
|
||||
@@ -264,8 +264,6 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
|
||||
{
|
||||
struct dev_set *ds = (struct dev_set *) f->private;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_SYSFS;
|
||||
|
||||
if (!ds->initialised)
|
||||
_init_devs(ds);
|
||||
|
||||
@@ -275,7 +273,6 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
|
||||
|
||||
if (!_set_lookup(ds, dev->dev)) {
|
||||
log_debug_devs("%s: Skipping (sysfs)", dev_name(dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_SYSFS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,13 +22,10 @@ static int _passes_lvm_type_device_filter(struct cmd_context *cmd, struct dev_fi
|
||||
struct dev_types *dt = (struct dev_types *) f->private;
|
||||
const char *name = dev_name(dev);
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_DEVTYPE;
|
||||
|
||||
/* Is this a recognised device type? */
|
||||
if (!dt->dev_type_array[MAJOR(dev->dev)].max_partitions) {
|
||||
log_debug_devs("%s: Skipping: Unrecognised LVM device type %"
|
||||
PRIu64, name, (uint64_t) MAJOR(dev->dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_DEVTYPE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -113,9 +113,6 @@ static int _passes_usable_filter(struct cmd_context *cmd, struct dev_filter *f,
|
||||
struct dev_usable_check_params ucp = {0};
|
||||
int r = 1;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_MINSIZE;
|
||||
dev->filtered_flags &= ~DEV_FILTERED_UNUSABLE;
|
||||
|
||||
/* further checks are done on dm devices only */
|
||||
if (dm_is_dm_major(MAJOR(dev->dev))) {
|
||||
switch (mode) {
|
||||
@@ -145,10 +142,8 @@ static int _passes_usable_filter(struct cmd_context *cmd, struct dev_filter *f,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(r = device_is_usable(dev, ucp))) {
|
||||
dev->filtered_flags |= DEV_FILTERED_UNUSABLE;
|
||||
if (!(r = device_is_usable(dev, ucp)))
|
||||
log_debug_devs("%s: Skipping unusable device.", dev_name(dev));
|
||||
}
|
||||
}
|
||||
|
||||
if (r) {
|
||||
@@ -158,8 +153,6 @@ static int _passes_usable_filter(struct cmd_context *cmd, struct dev_filter *f,
|
||||
/* fall through */
|
||||
case FILTER_MODE_PRE_LVMETAD:
|
||||
r = _check_pv_min_size(dev);
|
||||
if (!r)
|
||||
dev->filtered_flags |= DEV_FILTERED_MINSIZE;
|
||||
break;
|
||||
case FILTER_MODE_POST_LVMETAD:
|
||||
/* nothing to do here */
|
||||
|
||||
@@ -52,16 +52,4 @@ typedef enum {
|
||||
} filter_mode_t;
|
||||
struct dev_filter *usable_filter_create(struct cmd_context *cmd, struct dev_types *dt, filter_mode_t mode);
|
||||
|
||||
#define DEV_FILTERED_FWRAID 0x00000001
|
||||
#define DEV_FILTERED_INTERNAL 0x00000002
|
||||
#define DEV_FILTERED_MD_COMPONENT 0x00000004
|
||||
#define DEV_FILTERED_MPATH_COMPONENT 0x00000008
|
||||
#define DEV_FILTERED_PARTITIONED 0x00000010
|
||||
#define DEV_FILTERED_REGEX 0x00000020
|
||||
#define DEV_FILTERED_SIGNATURE 0x00000040
|
||||
#define DEV_FILTERED_SYSFS 0x00000080
|
||||
#define DEV_FILTERED_DEVTYPE 0x00000100
|
||||
#define DEV_FILTERED_MINSIZE 0x00000200
|
||||
#define DEV_FILTERED_UNUSABLE 0x00000400
|
||||
|
||||
#endif /* _LVM_FILTER_H */
|
||||
|
||||
@@ -315,13 +315,13 @@ struct volume_group *backup_read_vg(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
dm_list_iterate_items(mda, &tf->metadata_areas_in_use) {
|
||||
if (!(vg = mda->ops->vg_read(cmd, tf, vg_name, mda, NULL, NULL)))
|
||||
if (!(vg = mda->ops->vg_read(tf, vg_name, mda, NULL, NULL)))
|
||||
stack;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vg)
|
||||
set_pv_devices(tf, vg, NULL);
|
||||
set_pv_devices(tf, vg);
|
||||
|
||||
if (!vg)
|
||||
tf->fmt->ops->destroy_instance(tf);
|
||||
|
||||
@@ -129,7 +129,6 @@ static int _extend_buffer(struct formatter *f)
|
||||
log_error("Buffer reallocation failed.");
|
||||
return 0;
|
||||
}
|
||||
memset(newbuf + f->data.buf.size, 0, f->data.buf.size);
|
||||
f->data.buf.start = newbuf;
|
||||
f->data.buf.size *= 2;
|
||||
|
||||
@@ -1053,7 +1052,7 @@ int text_vg_export_file(struct volume_group *vg, const char *desc, FILE *fp)
|
||||
}
|
||||
|
||||
/* Returns amount of buffer used incl. terminating NUL */
|
||||
size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf, uint32_t *buf_size)
|
||||
size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf)
|
||||
{
|
||||
struct formatter *f;
|
||||
size_t r = 0;
|
||||
@@ -1064,7 +1063,7 @@ size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf,
|
||||
return_0;
|
||||
|
||||
f->data.buf.size = 65536; /* Initial metadata limit */
|
||||
if (!(f->data.buf.start = zalloc(f->data.buf.size))) {
|
||||
if (!(f->data.buf.start = malloc(f->data.buf.size))) {
|
||||
log_error("text_export buffer allocation failed");
|
||||
goto out;
|
||||
}
|
||||
@@ -1082,17 +1081,14 @@ size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf,
|
||||
r = f->data.buf.used + 1;
|
||||
*buf = f->data.buf.start;
|
||||
|
||||
if (buf_size)
|
||||
*buf_size = f->data.buf.size;
|
||||
|
||||
out:
|
||||
free(f);
|
||||
return r;
|
||||
}
|
||||
|
||||
static size_t _export_vg_to_buffer(struct volume_group *vg, char **buf)
|
||||
size_t export_vg_to_buffer(struct volume_group *vg, char **buf)
|
||||
{
|
||||
return text_vg_export_raw(vg, "", buf, NULL);
|
||||
return text_vg_export_raw(vg, "", buf);
|
||||
}
|
||||
|
||||
struct dm_config_tree *export_vg_to_config_tree(struct volume_group *vg)
|
||||
@@ -1100,7 +1096,7 @@ struct dm_config_tree *export_vg_to_config_tree(struct volume_group *vg)
|
||||
char *buf = NULL;
|
||||
struct dm_config_tree *vg_cft;
|
||||
|
||||
if (!_export_vg_to_buffer(vg, &buf)) {
|
||||
if (!export_vg_to_buffer(vg, &buf)) {
|
||||
log_error("Could not format metadata for VG %s.", vg->name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -72,8 +72,7 @@ static const struct flag _lv_flags[] = {
|
||||
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
|
||||
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
|
||||
{LV_METADATA_FORMAT, "METADATA_FORMAT", SEGTYPE_FLAG},
|
||||
{LV_CACHE_VOL, "CACHE_VOL", COMPATIBLE_FLAG},
|
||||
{LV_CACHE_USES_CACHEVOL, "CACHE_USES_CACHEVOL", SEGTYPE_FLAG},
|
||||
{LV_CACHE_VOL, "CACHE_VOL", STATUS_FLAG},
|
||||
{LV_NOSCAN, NULL, 0},
|
||||
{LV_TEMPORARY, NULL, 0},
|
||||
{POOL_METADATA_SPARE, NULL, 0},
|
||||
@@ -104,8 +103,6 @@ static const struct flag _lv_flags[] = {
|
||||
{LV_VDO_POOL, NULL, 0},
|
||||
{LV_VDO_POOL_DATA, NULL, 0},
|
||||
{WRITECACHE, NULL, 0},
|
||||
{INTEGRITY, NULL, 0},
|
||||
{INTEGRITY_METADATA, NULL, 0},
|
||||
{LV_PENDING_DELETE, NULL, 0}, /* FIXME Display like COMPATIBLE_FLAG */
|
||||
{LV_REMOVED, NULL, 0},
|
||||
{0, NULL, 0}
|
||||
@@ -195,21 +192,12 @@ int read_flags(uint64_t *status, enum pv_vg_lv_e type, int mask, const struct dm
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For a short time CACHE_VOL was a STATUS_FLAG, then it
|
||||
* was changed to COMPATIBLE_FLAG, so we want to read it
|
||||
* from either place.
|
||||
*/
|
||||
if (type == LV_FLAGS && !strcmp(cv->v.str, "CACHE_VOL"))
|
||||
mask = (STATUS_FLAG | COMPATIBLE_FLAG);
|
||||
|
||||
for (f = 0; flags[f].description; f++) {
|
||||
for (f = 0; flags[f].description; f++)
|
||||
if ((flags[f].kind & mask) &&
|
||||
!strcmp(flags[f].description, cv->v.str)) {
|
||||
s |= flags[f].mask;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == VG_FLAGS && !strcmp(cv->v.str, "PARTIAL")) {
|
||||
/*
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -80,7 +80,4 @@ struct data_area_list {
|
||||
int text_wipe_outdated_pv_mda(struct cmd_context *cmd, struct device *dev,
|
||||
struct metadata_area *mda);
|
||||
|
||||
void preserve_text_fidtc(struct volume_group *vg);
|
||||
void free_text_fidtc(struct volume_group *vg);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -47,15 +47,11 @@ enum pv_vg_lv_e {
|
||||
|
||||
struct text_vg_version_ops {
|
||||
int (*check_version) (const struct dm_config_tree * cf);
|
||||
|
||||
struct volume_group *(*read_vg) (struct cmd_context *cmd,
|
||||
const struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
const struct dm_config_tree *cft);
|
||||
|
||||
struct volume_group *(*read_vg) (struct format_instance * fid,
|
||||
const struct dm_config_tree *cf,
|
||||
unsigned allow_lvmetad_extensions);
|
||||
void (*read_desc) (struct dm_pool * mem, const struct dm_config_tree *cf,
|
||||
time_t *when, char **desc);
|
||||
|
||||
int (*read_vgsummary) (const struct format_type *fmt,
|
||||
const struct dm_config_tree *cft,
|
||||
struct lvmcache_vgsummary *vgsummary);
|
||||
@@ -70,7 +66,7 @@ int print_segtype_lvflags(char *buffer, size_t size, uint64_t status);
|
||||
int read_segtype_lvflags(uint64_t *status, char *segtype_str);
|
||||
|
||||
int text_vg_export_file(struct volume_group *vg, const char *desc, FILE *fp);
|
||||
size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf, uint32_t *alloc_size);
|
||||
size_t text_vg_export_raw(struct volume_group *vg, const char *desc, char **buf);
|
||||
struct volume_group *text_read_metadata_file(struct format_instance *fid,
|
||||
const char *file,
|
||||
time_t *when, char **desc);
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/metadata/metadata.h"
|
||||
#include "lib/commands/toolcontext.h"
|
||||
#include "import-export.h"
|
||||
|
||||
/* FIXME Use tidier inclusion method */
|
||||
@@ -182,7 +181,7 @@ struct volume_group *text_read_metadata(struct format_instance *fid,
|
||||
if (!(*vsn)->check_version(cft))
|
||||
continue;
|
||||
|
||||
if (!(vg = (*vsn)->read_vg(fid->fmt->cmd, fid->fmt, fid, cft)))
|
||||
if (!(vg = (*vsn)->read_vg(fid, cft, 0)))
|
||||
goto_out;
|
||||
|
||||
(*vsn)->read_desc(vg->vgmem, cft, when, desc);
|
||||
@@ -211,9 +210,9 @@ struct volume_group *text_read_metadata_file(struct format_instance *fid,
|
||||
when, desc);
|
||||
}
|
||||
|
||||
static struct volume_group *_import_vg_from_config_tree(struct cmd_context *cmd,
|
||||
static struct volume_group *_import_vg_from_config_tree(const struct dm_config_tree *cft,
|
||||
struct format_instance *fid,
|
||||
const struct dm_config_tree *cft)
|
||||
unsigned allow_lvmetad_extensions)
|
||||
{
|
||||
struct volume_group *vg = NULL;
|
||||
struct text_vg_version_ops **vsn;
|
||||
@@ -228,10 +227,10 @@ static struct volume_group *_import_vg_from_config_tree(struct cmd_context *cmd,
|
||||
* The only path to this point uses cached vgmetadata,
|
||||
* so it can use cached PV state too.
|
||||
*/
|
||||
if (!(vg = (*vsn)->read_vg(cmd, fid->fmt, fid, cft)))
|
||||
if (!(vg = (*vsn)->read_vg(fid, cft, allow_lvmetad_extensions)))
|
||||
stack;
|
||||
else {
|
||||
set_pv_devices(fid, vg, NULL);
|
||||
set_pv_devices(fid, vg);
|
||||
|
||||
if ((vg_missing = vg_missing_pv_count(vg)))
|
||||
log_verbose("There are %d physical volumes missing.", vg_missing);
|
||||
@@ -244,21 +243,8 @@ static struct volume_group *_import_vg_from_config_tree(struct cmd_context *cmd,
|
||||
return vg;
|
||||
}
|
||||
|
||||
struct volume_group *import_vg_from_config_tree(struct cmd_context *cmd,
|
||||
struct format_instance *fid,
|
||||
const struct dm_config_tree *cft)
|
||||
struct volume_group *import_vg_from_config_tree(const struct dm_config_tree *cft,
|
||||
struct format_instance *fid)
|
||||
{
|
||||
return _import_vg_from_config_tree(cmd, fid, cft);
|
||||
return _import_vg_from_config_tree(cft, fid, 0);
|
||||
}
|
||||
|
||||
struct volume_group *vg_from_config_tree(struct cmd_context *cmd, const struct dm_config_tree *cft)
|
||||
{
|
||||
static struct text_vg_version_ops *ops;
|
||||
|
||||
_init_text_import();
|
||||
|
||||
ops = _text_vsn_list[0];
|
||||
|
||||
return ops->read_vg(cmd, cmd->fmt, NULL, cft);
|
||||
}
|
||||
|
||||
|
||||
@@ -27,16 +27,11 @@
|
||||
#include "lib/config/defaults.h"
|
||||
#include "lib/datastruct/str_list.h"
|
||||
|
||||
typedef int (*section_fn) (struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *pvn,
|
||||
const struct dm_config_node *vgn,
|
||||
struct dm_hash_table *pv_hash,
|
||||
struct dm_hash_table *lv_hash);
|
||||
typedef int (*section_fn) (struct format_instance * fid,
|
||||
struct volume_group * vg, const struct dm_config_node * pvn,
|
||||
const struct dm_config_node * vgn,
|
||||
struct dm_hash_table * pv_hash,
|
||||
struct dm_hash_table * lv_hash);
|
||||
|
||||
#define _read_int32(root, path, result) \
|
||||
dm_config_get_uint32(root, path, (uint32_t *) (result))
|
||||
@@ -174,21 +169,16 @@ static int _read_str_list(struct dm_pool *mem, struct dm_list *list, const struc
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_pv(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *pvn,
|
||||
static int _read_pv(struct format_instance *fid,
|
||||
struct volume_group *vg, const struct dm_config_node *pvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash,
|
||||
struct dm_hash_table *lv_hash __attribute__((unused)))
|
||||
{
|
||||
struct dm_pool *mem = vg->vgmem;
|
||||
struct physical_volume *pv;
|
||||
struct pv_list *pvl;
|
||||
const struct dm_config_value *cv;
|
||||
const char *device_hint;
|
||||
uint64_t size, ba_start;
|
||||
|
||||
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
|
||||
@@ -233,11 +223,6 @@ static int _read_pv(struct cmd_context *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (dm_config_get_str(pvn, "device", &device_hint)) {
|
||||
if (!(pv->device_hint = dm_pool_strdup(mem, device_hint)))
|
||||
log_error("Failed to allocate memory for device hint in read_pv.");
|
||||
}
|
||||
|
||||
if (!_read_uint64(pvn, "pe_start", &pv->pe_start)) {
|
||||
log_error("Couldn't read extent start value (pe_start) "
|
||||
"for physical volume.");
|
||||
@@ -281,7 +266,7 @@ static int _read_pv(struct cmd_context *cmd,
|
||||
|
||||
pv->pe_alloc_count = 0;
|
||||
pv->pe_align = 0;
|
||||
pv->fmt = fmt;
|
||||
pv->fmt = fid->fmt;
|
||||
|
||||
if (!alloc_pv_segment_whole_pv(mem, pv))
|
||||
return_0;
|
||||
@@ -293,49 +278,6 @@ static int _read_pv(struct cmd_context *cmd,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_pvsummary(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *pvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash __attribute__((unused)),
|
||||
struct dm_hash_table *lv_hash __attribute__((unused)))
|
||||
{
|
||||
struct physical_volume *pv;
|
||||
struct pv_list *pvl;
|
||||
const char *device_hint;
|
||||
|
||||
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
|
||||
!(pvl->pv = dm_pool_zalloc(mem, sizeof(*pvl->pv))))
|
||||
return_0;
|
||||
|
||||
pv = pvl->pv;
|
||||
|
||||
if (!(pvn = pvn->child)) {
|
||||
log_error("Empty pv section.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_read_id(&pv->id, pvn, "id"))
|
||||
log_warn("Couldn't read uuid for physical volume.");
|
||||
|
||||
if (dm_config_has_node(pvn, "dev_size") &&
|
||||
!_read_uint64(pvn, "dev_size", &pv->size))
|
||||
log_warn("Couldn't read dev size for physical volume.");
|
||||
|
||||
if (dm_config_get_str(pvn, "device", &device_hint)) {
|
||||
if (!(pv->device_hint = dm_pool_strdup(mem, device_hint)))
|
||||
log_error("Failed to allocate memory for device hint in read_pv.");
|
||||
}
|
||||
|
||||
dm_list_add(&vgsummary->pvsummaries, &pvl->list);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void _insert_segment(struct logical_volume *lv, struct lv_segment *seg)
|
||||
{
|
||||
struct lv_segment *comp;
|
||||
@@ -351,13 +293,10 @@ static void _insert_segment(struct logical_volume *lv, struct lv_segment *seg)
|
||||
dm_list_add(&lv->segments, &seg->list);
|
||||
}
|
||||
|
||||
static int _read_segment(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct logical_volume *lv, const struct dm_config_node *sn,
|
||||
static int _read_segment(struct logical_volume *lv, const struct dm_config_node *sn,
|
||||
struct dm_hash_table *pv_hash)
|
||||
{
|
||||
struct dm_pool *mem = lv->vg->vgmem;
|
||||
uint32_t area_count = 0u;
|
||||
struct lv_segment *seg;
|
||||
const struct dm_config_node *sn_child = sn->child;
|
||||
@@ -409,7 +348,7 @@ static int _read_segment(struct cmd_context *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(segtype = get_segtype_from_string(cmd, segtype_with_flags)))
|
||||
if (!(segtype = get_segtype_from_string(lv->vg->cmd, segtype_with_flags)))
|
||||
return_0;
|
||||
|
||||
/* Can drop temporary string here as nothing has allocated from VGMEM meanwhile */
|
||||
@@ -526,11 +465,7 @@ int text_import_areas(struct lv_segment *seg, const struct dm_config_node *sn,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_segments(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct logical_volume *lv, const struct dm_config_node *lvn,
|
||||
static int _read_segments(struct logical_volume *lv, const struct dm_config_node *lvn,
|
||||
struct dm_hash_table *pv_hash)
|
||||
{
|
||||
const struct dm_config_node *sn;
|
||||
@@ -542,7 +477,7 @@ static int _read_segments(struct cmd_context *cmd,
|
||||
* All sub-sections are assumed to be segments.
|
||||
*/
|
||||
if (!sn->v) {
|
||||
if (!_read_segment(cmd, fmt, fid, mem, lv, sn, pv_hash))
|
||||
if (!_read_segment(lv, sn, pv_hash))
|
||||
return_0;
|
||||
|
||||
count++;
|
||||
@@ -581,17 +516,13 @@ static int _read_segments(struct cmd_context *cmd,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_lvnames(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid __attribute__((unused)),
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *lvn,
|
||||
static int _read_lvnames(struct format_instance *fid __attribute__((unused)),
|
||||
struct volume_group *vg, const struct dm_config_node *lvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash __attribute__((unused)),
|
||||
struct dm_hash_table *lv_hash)
|
||||
{
|
||||
struct dm_pool *mem = vg->vgmem;
|
||||
struct logical_volume *lv;
|
||||
const char *str;
|
||||
const struct dm_config_value *cv;
|
||||
@@ -681,7 +612,7 @@ static int _read_lvnames(struct cmd_context *cmd,
|
||||
if (dm_config_get_str(lvn, "profile", &str)) {
|
||||
log_debug_metadata("Adding profile configuration %s for LV %s.",
|
||||
str, display_lvname(lv));
|
||||
if (!(lv->profile = add_profile(cmd, str, CONFIG_PROFILE_METADATA))) {
|
||||
if (!(lv->profile = add_profile(vg->cmd, str, CONFIG_PROFILE_METADATA))) {
|
||||
log_error("Failed to add configuration profile %s for LV %s.",
|
||||
str, display_lvname(lv));
|
||||
return 0;
|
||||
@@ -690,7 +621,7 @@ static int _read_lvnames(struct cmd_context *cmd,
|
||||
|
||||
if (!_read_int32(lvn, "read_ahead", &lv->read_ahead))
|
||||
/* If not present, choice of auto or none is configurable */
|
||||
lv->read_ahead = cmd->default_settings.read_ahead;
|
||||
lv->read_ahead = vg->cmd->default_settings.read_ahead;
|
||||
else {
|
||||
switch (lv->read_ahead) {
|
||||
case 0:
|
||||
@@ -740,17 +671,13 @@ static int _read_lvnames(struct cmd_context *cmd,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_historical_lvnames(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid __attribute__((unused)),
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *hlvn,
|
||||
static int _read_historical_lvnames(struct format_instance *fid __attribute__((unused)),
|
||||
struct volume_group *vg, const struct dm_config_node *hlvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash __attribute__((unused)),
|
||||
struct dm_hash_table *lv_hash __attribute__((unused)))
|
||||
{
|
||||
struct dm_pool *mem = vg->vgmem;
|
||||
struct generic_logical_volume *glv;
|
||||
struct glv_list *glvl;
|
||||
const char *str;
|
||||
@@ -813,17 +740,13 @@ bad:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _read_historical_lvnames_interconnections(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid __attribute__((unused)),
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *hlvn,
|
||||
static int _read_historical_lvnames_interconnections(struct format_instance *fid __attribute__((unused)),
|
||||
struct volume_group *vg, const struct dm_config_node *hlvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash __attribute__((unused)),
|
||||
struct dm_hash_table *lv_hash __attribute__((unused)))
|
||||
{
|
||||
struct dm_pool *mem = vg->vgmem;
|
||||
const char *historical_lv_name, *origin_name = NULL;
|
||||
struct generic_logical_volume *glv, *origin_glv, *descendant_glv;
|
||||
struct logical_volume *tmp_lv;
|
||||
@@ -927,13 +850,8 @@ bad:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _read_lvsegs(struct cmd_context *cmd,
|
||||
struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *lvn,
|
||||
static int _read_lvsegs(struct format_instance *fid,
|
||||
struct volume_group *vg, const struct dm_config_node *lvn,
|
||||
const struct dm_config_node *vgn __attribute__((unused)),
|
||||
struct dm_hash_table *pv_hash,
|
||||
struct dm_hash_table *lv_hash)
|
||||
@@ -959,7 +877,7 @@ static int _read_lvsegs(struct cmd_context *cmd,
|
||||
|
||||
memcpy(&lv->lvid.id[0], &lv->vg->id, sizeof(lv->lvid.id[0]));
|
||||
|
||||
if (!_read_segments(cmd, fmt, fid, mem, lv, lvn, pv_hash))
|
||||
if (!_read_segments(lv, lvn, pv_hash))
|
||||
return_0;
|
||||
|
||||
lv->size = (uint64_t) lv->le_count * (uint64_t) vg->extent_size;
|
||||
@@ -975,14 +893,14 @@ static int _read_lvsegs(struct cmd_context *cmd,
|
||||
|
||||
if (!dm_config_has_node(lvn, "major"))
|
||||
/* If major is missing, pick default */
|
||||
lv->major = cmd->dev_types->device_mapper_major;
|
||||
lv->major = vg->cmd->dev_types->device_mapper_major;
|
||||
else if (!_read_int32(lvn, "major", &lv->major)) {
|
||||
log_warn("WARNING: Couldn't read major number for logical "
|
||||
"volume %s.", display_lvname(lv));
|
||||
lv->major = cmd->dev_types->device_mapper_major;
|
||||
lv->major = vg->cmd->dev_types->device_mapper_major;
|
||||
}
|
||||
|
||||
if (!validate_major_minor(cmd, fmt, lv->major, lv->minor)) {
|
||||
if (!validate_major_minor(vg->cmd, fid->fmt, lv->major, lv->minor)) {
|
||||
log_warn("WARNING: Ignoring invalid major, minor number for "
|
||||
"logical volume %s.", display_lvname(lv));
|
||||
lv->major = lv->minor = -1;
|
||||
@@ -992,14 +910,9 @@ static int _read_lvsegs(struct cmd_context *cmd,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_sections(struct cmd_context *cmd,
|
||||
const struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
struct dm_pool *mem,
|
||||
static int _read_sections(struct format_instance *fid,
|
||||
const char *section, section_fn fn,
|
||||
struct volume_group *vg,
|
||||
struct lvmcache_vgsummary *vgsummary,
|
||||
const struct dm_config_node *vgn,
|
||||
struct volume_group *vg, const struct dm_config_node *vgn,
|
||||
struct dm_hash_table *pv_hash,
|
||||
struct dm_hash_table *lv_hash,
|
||||
int optional)
|
||||
@@ -1016,19 +929,17 @@ static int _read_sections(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
for (n = n->child; n; n = n->sib) {
|
||||
if (!fn(cmd, (struct format_type *)fmt, fid, mem, vg, vgsummary, n, vgn, pv_hash, lv_hash))
|
||||
if (!fn(fid, vg, n, vgn, pv_hash, lv_hash))
|
||||
return_0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
const struct format_type *fmt,
|
||||
struct format_instance *fid,
|
||||
const struct dm_config_tree *cft)
|
||||
static struct volume_group *_read_vg(struct format_instance *fid,
|
||||
const struct dm_config_tree *cft,
|
||||
unsigned allow_lvmetad_extensions)
|
||||
{
|
||||
struct dm_pool *mem;
|
||||
const struct dm_config_node *vgn;
|
||||
const struct dm_config_value *cv;
|
||||
const char *str, *format_str, *system_id;
|
||||
@@ -1045,11 +956,9 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(vg = alloc_vg("read_vg", cmd, vgn->key)))
|
||||
if (!(vg = alloc_vg("read_vg", fid->fmt->cmd, vgn->key)))
|
||||
return_NULL;
|
||||
|
||||
mem = vg->vgmem;
|
||||
|
||||
/*
|
||||
* The pv hash memorises the pv section names -> pv
|
||||
* structures.
|
||||
@@ -1072,13 +981,13 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
|
||||
/* A backup file might be a backup of a different format */
|
||||
if (dm_config_get_str(vgn, "format", &format_str) &&
|
||||
!(vg->original_fmt = get_format_by_name(cmd, format_str))) {
|
||||
!(vg->original_fmt = get_format_by_name(fid->fmt->cmd, format_str))) {
|
||||
log_error("Unrecognised format %s for volume group %s.", format_str, vg->name);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (dm_config_get_str(vgn, "lock_type", &str)) {
|
||||
if (!(vg->lock_type = dm_pool_strdup(mem, str)))
|
||||
if (!(vg->lock_type = dm_pool_strdup(vg->vgmem, str)))
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@@ -1104,7 +1013,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
* the lock_args before using it to access the lock manager.
|
||||
*/
|
||||
if (dm_config_get_str(vgn, "lock_args", &str)) {
|
||||
if (!(vg->lock_args = dm_pool_strdup(mem, str)))
|
||||
if (!(vg->lock_args = dm_pool_strdup(vg->vgmem, str)))
|
||||
goto bad;
|
||||
}
|
||||
|
||||
@@ -1126,7 +1035,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
if (dm_config_get_str(vgn, "system_id", &system_id)) {
|
||||
if (!(vg->system_id = dm_pool_strdup(mem, system_id))) {
|
||||
if (!(vg->system_id = dm_pool_strdup(vg->vgmem, system_id))) {
|
||||
log_error("Failed to allocate memory for system_id in _read_vg.");
|
||||
goto bad;
|
||||
}
|
||||
@@ -1171,7 +1080,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
|
||||
if (dm_config_get_str(vgn, "profile", &str)) {
|
||||
log_debug_metadata("Adding profile configuration %s for VG %s.", str, vg->name);
|
||||
vg->profile = add_profile(cmd, str, CONFIG_PROFILE_METADATA);
|
||||
vg->profile = add_profile(vg->cmd, str, CONFIG_PROFILE_METADATA);
|
||||
if (!vg->profile) {
|
||||
log_error("Failed to add configuration profile %s for VG %s", str, vg->name);
|
||||
goto bad;
|
||||
@@ -1182,7 +1091,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
vg->mda_copies = DEFAULT_VGMETADATACOPIES;
|
||||
}
|
||||
|
||||
if (!_read_sections(cmd, fmt, fid, mem, "physical_volumes", _read_pv, vg, NULL,
|
||||
if (!_read_sections(fid, "physical_volumes", _read_pv, vg,
|
||||
vgn, pv_hash, lv_hash, 0)) {
|
||||
log_error("Couldn't find all physical volumes for volume "
|
||||
"group %s.", vg->name);
|
||||
@@ -1191,34 +1100,34 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
|
||||
/* Optional tags */
|
||||
if (dm_config_get_list(vgn, "tags", &cv) &&
|
||||
!(_read_str_list(mem, &vg->tags, cv))) {
|
||||
!(_read_str_list(vg->vgmem, &vg->tags, cv))) {
|
||||
log_error("Couldn't read tags for volume group %s.", vg->name);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!_read_sections(cmd, fmt, fid, mem, "logical_volumes", _read_lvnames, vg, NULL,
|
||||
if (!_read_sections(fid, "logical_volumes", _read_lvnames, vg,
|
||||
vgn, pv_hash, lv_hash, 1)) {
|
||||
log_error("Couldn't read all logical volume names for volume "
|
||||
"group %s.", vg->name);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!_read_sections(cmd, fmt, fid, mem, "historical_logical_volumes", _read_historical_lvnames, vg, NULL,
|
||||
if (!_read_sections(fid, "historical_logical_volumes", _read_historical_lvnames, vg,
|
||||
vgn, pv_hash, lv_hash, 1)) {
|
||||
log_error("Couldn't read all historical logical volumes for volume "
|
||||
"group %s.", vg->name);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!_read_sections(cmd, fmt, fid, mem, "logical_volumes", _read_lvsegs, vg, NULL,
|
||||
if (!_read_sections(fid, "logical_volumes", _read_lvsegs, vg,
|
||||
vgn, pv_hash, lv_hash, 1)) {
|
||||
log_error("Couldn't read all logical volumes for "
|
||||
"volume group %s.", vg->name);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!_read_sections(cmd, fmt, fid, mem, "historical_logical_volumes", _read_historical_lvnames_interconnections,
|
||||
vg, NULL, vgn, pv_hash, lv_hash, 1)) {
|
||||
if (!_read_sections(fid, "historical_logical_volumes", _read_historical_lvnames_interconnections,
|
||||
vg, vgn, pv_hash, lv_hash, 1)) {
|
||||
log_error("Couldn't read all removed logical volume interconnections "
|
||||
"for volume group %s.", vg->name);
|
||||
goto bad;
|
||||
@@ -1233,8 +1142,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
|
||||
dm_hash_destroy(pv_hash);
|
||||
dm_hash_destroy(lv_hash);
|
||||
|
||||
if (fid)
|
||||
vg_set_fid(vg, fid);
|
||||
vg_set_fid(vg, fid);
|
||||
|
||||
/*
|
||||
* Finished.
|
||||
@@ -1327,11 +1235,6 @@ static int _read_vgsummary(const struct format_type *fmt, const struct dm_config
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_read_sections(fmt->cmd, NULL, NULL, mem, "physical_volumes", _read_pvsummary, NULL, vgsummary,
|
||||
vgn, NULL, NULL, 0)) {
|
||||
log_debug("Couldn't read pv summaries");
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,10 +18,11 @@
|
||||
|
||||
#include "lib/config/config.h"
|
||||
#include "lib/metadata/metadata.h"
|
||||
#include "lib/format_text/format-text.h"
|
||||
#include "lib/cache/lvmcache.h"
|
||||
#include "lib/uuid/uuid.h"
|
||||
|
||||
/* disk_locn and data_area_list are defined in format-text.h */
|
||||
|
||||
/*
|
||||
* PV header extension versions:
|
||||
* - version 1: bootloader area support
|
||||
@@ -33,7 +34,7 @@ struct pv_header_extension {
|
||||
uint32_t version;
|
||||
uint32_t flags;
|
||||
/* NULL-terminated list of bootloader areas */
|
||||
struct disk_locn bootloader_areas_xl[];
|
||||
struct disk_locn bootloader_areas_xl[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Fields with the suffix _xl should be xlate'd wherever they appear */
|
||||
@@ -46,7 +47,7 @@ struct pv_header {
|
||||
|
||||
/* NULL-terminated list of data areas followed by */
|
||||
/* NULL-terminated list of metadata area headers */
|
||||
struct disk_locn disk_areas_xl[]; /* Two lists */
|
||||
struct disk_locn disk_areas_xl[0]; /* Two lists */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
@@ -76,7 +77,7 @@ struct mda_header {
|
||||
uint64_t start; /* Absolute start byte of mda_header */
|
||||
uint64_t size; /* Size of metadata area */
|
||||
|
||||
struct raw_locn raw_locns[]; /* NULL-terminated list */
|
||||
struct raw_locn raw_locns[0]; /* NULL-terminated list */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct mda_header *raw_read_mda_header(const struct format_type *fmt,
|
||||
@@ -103,8 +104,7 @@ struct mda_context {
|
||||
#define MDA_SIZE_MIN (8 * (unsigned) lvm_getpagesize())
|
||||
#define MDA_ORIGINAL_ALIGNMENT 512 /* Original alignment used for start of VG metadata content */
|
||||
|
||||
int read_metadata_location_summary(const struct format_type *fmt,
|
||||
struct metadata_area *mda, struct mda_header *mdah, int primary_mda,
|
||||
int read_metadata_location_summary(const struct format_type *fmt, struct mda_header *mdah, int primary_mda,
|
||||
struct device_area *dev_area, struct lvmcache_vgsummary *vgsummary,
|
||||
uint64_t *mda_free_sectors);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user