1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-09-20 05:44:20 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Marian Csontos
4ae4285805 make: Fix typo in M_INSTALL_SCRIPT
- M_INSTALL_SCRIPT is unused, should be M_INSTALL_PROGRAM
2017-12-14 16:46:08 +01:00
417 changed files with 13852 additions and 14850 deletions

101
.gitignore vendored
View File

@@ -1,7 +1,6 @@
*.5
*.7
*.8
*.8_gen
*.a
*.d
*.o
@@ -31,103 +30,3 @@ make.tmpl
/cscope.out
/tags
/tmp/
tools/man-generator
tools/man-generator.c
test/lib/lvchange
test/lib/lvconvert
test/lib/lvcreate
test/lib/lvdisplay
test/lib/lvextend
test/lib/lvmconfig
test/lib/lvmdiskscan
test/lib/lvmsadc
test/lib/lvmsar
test/lib/lvreduce
test/lib/lvremove
test/lib/lvrename
test/lib/lvresize
test/lib/lvs
test/lib/lvscan
test/lib/pvchange
test/lib/pvck
test/lib/pvcreate
test/lib/pvdisplay
test/lib/pvmove
test/lib/pvremove
test/lib/pvresize
test/lib/pvs
test/lib/pvscan
test/lib/vgcfgbackup
test/lib/vgcfgrestore
test/lib/vgchange
test/lib/vgck
test/lib/vgconvert
test/lib/vgcreate
test/lib/vgdisplay
test/lib/vgexport
test/lib/vgextend
test/lib/vgimport
test/lib/vgimportclone
test/lib/vgmerge
test/lib/vgmknodes
test/lib/vgreduce
test/lib/vgremove
test/lib/vgrename
test/lib/vgs
test/lib/vgscan
test/lib/vgsplit
test/api/lvtest.t
test/api/pe_start.t
test/api/percent.t
test/api/python_lvm_unit.py
test/api/test
test/api/thin_percent.t
test/api/vglist.t
test/api/vgtest.t
test/lib/aux
test/lib/check
test/lib/clvmd
test/lib/dm-version-expected
test/lib/dmeventd
test/lib/dmsetup
test/lib/dmstats
test/lib/fail
test/lib/flavour-ndev-cluster
test/lib/flavour-ndev-cluster-lvmpolld
test/lib/flavour-ndev-lvmetad
test/lib/flavour-ndev-lvmetad-lvmpolld
test/lib/flavour-ndev-lvmpolld
test/lib/flavour-ndev-vanilla
test/lib/flavour-udev-cluster
test/lib/flavour-udev-cluster-lvmpolld
test/lib/flavour-udev-lvmetad
test/lib/flavour-udev-lvmetad-lvmpolld
test/lib/flavour-udev-lvmlockd-dlm
test/lib/flavour-udev-lvmlockd-sanlock
test/lib/flavour-udev-lvmlockd-test
test/lib/flavour-udev-lvmpolld
test/lib/flavour-udev-vanilla
test/lib/fsadm
test/lib/get
test/lib/inittest
test/lib/invalid
test/lib/lvm
test/lib/lvm-wrapper
test/lib/lvmchange
test/lib/lvmdbusd.profile
test/lib/lvmetad
test/lib/lvmpolld
test/lib/not
test/lib/paths
test/lib/paths-common
test/lib/runner
test/lib/should
test/lib/test
test/lib/thin-performance.profile
test/lib/utils
test/lib/version-expected
test/unit/dmraid_t.c
test/unit/unit-test

View File

@@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -18,7 +18,7 @@ top_builddir = @top_builddir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
SUBDIRS = conf daemons include lib libdaemon libdm man scripts device_mapper tools
SUBDIRS = conf daemons include lib libdaemon libdm man scripts tools
ifeq ("@UDEV_RULES@", "yes")
SUBDIRS += udev
@@ -43,7 +43,8 @@ endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = conf include man test scripts \
libdaemon lib tools daemons libdm \
udev po liblvm python device_mapper
udev po liblvm python \
unit-tests/datastruct unit-tests/mm unit-tests/regex
tools.distclean: test.distclean
endif
DISTCLEAN_DIRS += lcov_reports*
@@ -61,9 +62,6 @@ po: tools daemons
man: tools
all_man: tools
scripts: liblvm libdm
test: tools daemons
unit-test: lib
run-unit-test: unit-test
lib.device-mapper: include.device-mapper
libdm.device-mapper: include.device-mapper
@@ -99,7 +97,7 @@ endif
DISTCLEAN_TARGETS += cscope.out
CLEAN_DIRS += autom4te.cache
check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit-test run-unit-test: test
check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit: all
$(MAKE) -C test $(@)
conf.generate man.generate: tools
@@ -148,7 +146,7 @@ install_system_dirs:
$(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_RUN_DIR)
$(INSTALL_ROOT_DATA) /dev/null $(DESTDIR)$(DEFAULT_CACHE_DIR)/.cache
install_initscripts:
install_initscripts:
$(MAKE) -C scripts install_initscripts
install_systemd_generators:
@@ -171,7 +169,6 @@ install_tmpfiles_configuration:
LCOV_TRACES = libdm.info lib.info liblvm.info tools.info \
libdaemon/client.info libdaemon/server.info \
test/unit.info \
daemons/clvmd.info \
daemons/dmeventd.info \
daemons/lvmetad.info \
@@ -214,6 +211,31 @@ endif
endif
ifeq ("$(TESTING)", "yes")
# testing and report generation
RUBY=ruby1.9 -Ireport-generators/lib -Ireport-generators/test
.PHONY: unit-test ruby-test test-programs
# FIXME: put dependencies on libdm and liblvm
# FIXME: Should be handled by Makefiles in subdirs, not here at top level.
test-programs:
cd unit-tests/regex && $(MAKE)
cd unit-tests/datastruct && $(MAKE)
cd unit-tests/mm && $(MAKE)
unit-test: test-programs
$(RUBY) report-generators/unit_test.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
memcheck: test-programs
$(RUBY) report-generators/memcheck.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
ruby-test:
$(RUBY) report-generators/test/ts.rb
endif
ifneq ($(shell which ctags),)
.PHONY: tags
tags:

View File

@@ -1 +1 @@
2.02.178(2) (2018-06-13)
2.02.177(2)-git (2017-11-03)

View File

@@ -1 +1 @@
1.02.147 (2018-06-13)
1.02.146-git (2017-11-03)

View File

@@ -1,70 +1,5 @@
Version 2.02.178 - 13th June 2018
Version 2.02.177 -
====================================
Add libaio dependency for build.
Remove lvm1 and pool format handling and add filter to ignore them.
Move some filter checks to after disks are read.
Rework disk scanning and when it is used.
Add new io layer and shift code to using it.
Fix lvconvert's return code on degraded -m raid1 conversion.
--enable-testing switch for ./configure has been removed.
--with-snapshots switch for ./configure has been removed.
--with-mirrors switch for ./configure has been removed.
--with-raid switch for ./configure has been removed.
--with-thin switch for ./configure has been removed.
--with-cache switch for ./configure has been removed.
Include new unit-test framework and unit tests.
Extend validation of region_size for mirror segment.
Reload whole device stack when reinitilizing mirror log.
Mirrors without monitoring are WARNING and not blocking on error.
Detect too big region_size with clustered mirrors.
Fix evaluation of maximal region size for mirror log.
Enhance mirror log size estimation and use smaller size when possible.
Fix incorrect mirror log size calculation on 32bit arch.
Enhance preloading tree creating.
Fix regression on acceptance of any LV on lvconvert.
Restore usability of thin LV to be again external origin for another thin.
Keep systemd vars on change event in 69-dm-lvm-metad.rules for systemd reload.
Write systemd and non-systemd rule in 69-dm-lvm-metad.rules, GOTO active one.
Add test for activation/volume_list (Sub)LV remnants.
Disallow usage of cache format 2 with mq cache policy.
Again accept striped LV as COW LV with lvconvert -s (2.02.169).
Fix raid target version testing for supported features.
Allow activation of pools when thin/cache_check tool is missing.
Remove RaidLV on creation failure when rmeta devices can't be activated.
Add prioritized_section() to restore cookie boundaries (2.02.177).
Enhance error messages when read error happens.
Enhance mirror log initialization for old mirror target.
Skip private crypto and stratis devices.
Skip frozen raid devices from scanning.
Activate RAID SubLVs on read_only_volume_list readwrite.
Offer convenience type raid5_n converting to raid10.
Automatically avoid reading invalid snapshots during device scan.
Ensure COW device is writable even for read-only thick snapshots.
Support activation of component LVs in read-only mode.
Extend internal library to recognize and work with component LV.
Skip duplicate check for active LV when prompting for its removal.
Activate correct lock holding LV when it is cached.
Do not modify archived metadata when removing striped raid.
Fix memleak on error path when obtaining lv_raid_data_offset.
Fix compatibility size test of extended external origin.
Add external_origin visiting in for_each_sub_lv().
Ensure cluster commands drop their device cache before locking VG.
Do not report LV as remotely active when it's locally exclusive in cluster.
Add deprecate messages for usage of mirrors with mirrorlog.
Separate reporting of monitoring status and error status.
Improve validation of created strings in vgimportclone.
Add missing initialisation of mem pool in systemd generator.
Do not reopen output streams for multithreaded users of liblvm.
Configure ensures /usr/bin dir is checked for dmpd tools.
Restore pvmove support for wide-clustered active volumes (2.02.177).
Avoid non-exclusive activation of exclusive segment types.
Fix trimming sibling PVs when doing a pvmove of raid subLVs.
Preserve exclusive activation during thin snaphost merge.
Avoid exceeding array bounds in allocation tag processing.
Add --lockopt to common options and add option to skip selected locks.
Version 2.02.177 - 18th December 2017
=====================================
When writing text metadata content, use complete 4096 byte blocks.
Change text format metadata alignment from 512 to 4096 bytes.
When writing metadata, consistently skip mdas marked as failed.

View File

@@ -1,17 +1,5 @@
Version 1.02.147 - 13th June 2018
Version 1.02.146 -
====================================
Reuse uname() result for mirror target.
Recognize also mounted btrfs through dm_device_has_mounted_fs().
Add missing log_error() into dm_stats_populate() returning 0.
Avoid calling dm_stats_populat() for DM devices without any stats regions.
Support DM_DEBUG_WITH_LINE_NUMBERS envvar for debug msg with source:line.
Configured command for thin pool threshold handling gets whole environment.
Fix tests for failing dm_snprintf() in stats code.
Parsing mirror status accepts 'userspace' keyword in status.
Introduce dm_malloc_aligned for page alignment of buffers.
Version 1.02.146 - 18th December 2017
=====================================
Activation tree of thin pool skips duplicated check of pool status.
Remove code supporting replicator target.
Do not ignore failure of _info_by_dev().

View File

@@ -155,7 +155,7 @@ AC_DEFUN([AC_TRY_LDFLAGS],
# and this notice are preserved. This file is offered as-is, without any
# warranty.
serial 3
#serial 3
AC_DEFUN([AX_GCC_BUILTIN], [
AS_VAR_PUSHDEF([ac_var], [ax_cv_have_$1])

210
aclocal.m4 vendored
View File

@@ -69,63 +69,32 @@ AC_DEFUN([AX_PYTHON_MODULE],[
fi
])
dnl pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
dnl serial 11 (pkg-config-0.29)
dnl
dnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
dnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
dnl the Free Software Foundation; either version 2 of the License, or
dnl (at your option) any later version.
dnl
dnl This program is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of
dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
dnl General Public License for more details.
dnl
dnl You should have received a copy of the GNU General Public License
dnl along with this program; if not, write to the Free Software
dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
dnl 02111-1307, USA.
dnl
dnl As a special exception to the GNU General Public License, if you
dnl distribute this file as part of a program that contains a
dnl configuration script generated by Autoconf, you may include it under
dnl the same distribution terms that you use for the rest of that
dnl program.
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 1 (pkg-config-0.24)
#
# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
dnl PKG_PREREQ(MIN-VERSION)
dnl -----------------------
dnl Since: 0.29
dnl
dnl Verify that the version of the pkg-config macros are at least
dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's
dnl installed version of pkg-config, this checks the developer's version
dnl of pkg.m4 when generating configure.
dnl
dnl To ensure that this macro is defined, also add:
dnl m4_ifndef([PKG_PREREQ],
dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])])
dnl
dnl See the "Since" comment for each macro you use to see what version
dnl of the macros you require.
m4_defun([PKG_PREREQ],
[m4_define([PKG_MACROS_VERSION], [0.29])
m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1,
[m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])])
])dnl PKG_PREREQ
dnl PKG_PROG_PKG_CONFIG([MIN-VERSION])
dnl ----------------------------------
dnl Since: 0.16
dnl
dnl Search for the pkg-config tool and set the PKG_CONFIG variable to
dnl first found in the path. Checks that the version of pkg-config found
dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is
dnl used since that's the first version where most current features of
dnl pkg-config existed.
# PKG_PROG_PKG_CONFIG([MIN-VERSION])
# ----------------------------------
AC_DEFUN([PKG_PROG_PKG_CONFIG],
[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$])
@@ -147,19 +116,18 @@ if test -n "$PKG_CONFIG"; then
PKG_CONFIG=""
fi
fi[]dnl
])dnl PKG_PROG_PKG_CONFIG
])# PKG_PROG_PKG_CONFIG
dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------------------------------
dnl Since: 0.18
dnl
dnl Check to see whether a particular set of modules exists. Similar to
dnl PKG_CHECK_MODULES(), but does not set variables or print errors.
dnl
dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
dnl only at the first occurence in configure.ac, so if the first place
dnl it's called might be skipped (such as if it is within an "if", you
dnl have to call PKG_CHECK_EXISTS manually
# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
#
# Check to see whether a particular set of modules exists. Similar
# to PKG_CHECK_MODULES(), but does not set variables or print errors.
#
# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
# only at the first occurence in configure.ac, so if the first place
# it's called might be skipped (such as if it is within an "if", you
# have to call PKG_CHECK_EXISTS manually
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_EXISTS],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
if test -n "$PKG_CONFIG" && \
@@ -169,10 +137,8 @@ m4_ifvaln([$3], [else
$3])dnl
fi])
dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
dnl ---------------------------------------------
dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting
dnl pkg_failed based on the result.
# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
# ---------------------------------------------
m4_define([_PKG_CONFIG],
[if test -n "$$1"; then
pkg_cv_[]$1="$$1"
@@ -184,11 +150,10 @@ m4_define([_PKG_CONFIG],
else
pkg_failed=untried
fi[]dnl
])dnl _PKG_CONFIG
])# _PKG_CONFIG
dnl _PKG_SHORT_ERRORS_SUPPORTED
dnl ---------------------------
dnl Internal check to see if pkg-config supports short errors.
# _PKG_SHORT_ERRORS_SUPPORTED
# -----------------------------
AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
@@ -196,17 +161,19 @@ if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
else
_pkg_short_errors_supported=no
fi[]dnl
])dnl _PKG_SHORT_ERRORS_SUPPORTED
])# _PKG_SHORT_ERRORS_SUPPORTED
dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl --------------------------------------------------------------
dnl Since: 0.4.0
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES might not happen, you should be sure to include an
dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
# [ACTION-IF-NOT-FOUND])
#
#
# Note that if there is a possibility the first call to
# PKG_CHECK_MODULES might not happen, you should be sure to include an
# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
#
#
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_MODULES],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
@@ -260,40 +227,16 @@ else
AC_MSG_RESULT([yes])
$3
fi[]dnl
])dnl PKG_CHECK_MODULES
])# PKG_CHECK_MODULES
dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl ---------------------------------------------------------------------
dnl Since: 0.29
dnl
dnl Checks for existence of MODULES and gathers its build flags with
dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags
dnl and VARIABLE-PREFIX_LIBS from --libs.
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to
dnl include an explicit call to PKG_PROG_PKG_CONFIG in your
dnl configure.ac.
AC_DEFUN([PKG_CHECK_MODULES_STATIC],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
_save_PKG_CONFIG=$PKG_CONFIG
PKG_CONFIG="$PKG_CONFIG --static"
PKG_CHECK_MODULES($@)
PKG_CONFIG=$_save_PKG_CONFIG[]dnl
])dnl PKG_CHECK_MODULES_STATIC
dnl PKG_INSTALLDIR([DIRECTORY])
dnl -------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable pkgconfigdir as the location where a module
dnl should install pkg-config .pc files. By default the directory is
dnl $libdir/pkgconfig, but the default can be changed by passing
dnl DIRECTORY. The user can override through the --with-pkgconfigdir
dnl parameter.
# PKG_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable pkgconfigdir as the location where a module
# should install pkg-config .pc files. By default the directory is
# $libdir/pkgconfig, but the default can be changed by passing
# DIRECTORY. The user can override through the --with-pkgconfigdir
# parameter.
AC_DEFUN([PKG_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])])
m4_pushdef([pkg_description],
@@ -304,18 +247,16 @@ AC_ARG_WITH([pkgconfigdir],
AC_SUBST([pkgconfigdir], [$with_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_INSTALLDIR
]) dnl PKG_INSTALLDIR
dnl PKG_NOARCH_INSTALLDIR([DIRECTORY])
dnl --------------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable noarch_pkgconfigdir as the location where a
dnl module should install arch-independent pkg-config .pc files. By
dnl default the directory is $datadir/pkgconfig, but the default can be
dnl changed by passing DIRECTORY. The user can override through the
dnl --with-noarch-pkgconfigdir parameter.
# PKG_NOARCH_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable noarch_pkgconfigdir as the location where a
# module should install arch-independent pkg-config .pc files. By
# default the directory is $datadir/pkgconfig, but the default can be
# changed by passing DIRECTORY. The user can override through the
# --with-noarch-pkgconfigdir parameter.
AC_DEFUN([PKG_NOARCH_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])])
m4_pushdef([pkg_description],
@@ -326,15 +267,13 @@ AC_ARG_WITH([noarch-pkgconfigdir],
AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_NOARCH_INSTALLDIR
]) dnl PKG_NOARCH_INSTALLDIR
dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------
dnl Since: 0.28
dnl
dnl Retrieves the value of the pkg-config variable for the given module.
# PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
# [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
# -------------------------------------------
# Retrieves the value of the pkg-config variable for the given module.
AC_DEFUN([PKG_CHECK_VAR],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl
@@ -343,7 +282,7 @@ _PKG_CONFIG([$1], [variable="][$3]["], [$2])
AS_VAR_COPY([$1], [pkg_cv_][$1])
AS_VAR_IF([$1], [""], [$5], [$4])dnl
])dnl PKG_CHECK_VAR
])# PKG_CHECK_VAR
# Copyright (C) 1999-2014 Free Software Foundation, Inc.
#
@@ -597,4 +536,5 @@ AC_DEFUN([AM_RUN_LOG],
echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
(exit $ac_status); }])
m4_include([acinclude.m4])

View File

@@ -1,570 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#include "radix-tree.h"
#include "base/memory/container_of.h"
#include "base/memory/zalloc.h"
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
//----------------------------------------------------------------
enum node_type {
UNSET = 0,
VALUE,
VALUE_CHAIN,
PREFIX_CHAIN,
NODE4,
NODE16,
NODE48,
NODE256
};
struct value {
enum node_type type;
union radix_value value;
};
// This is used for entries that have a key which is a prefix of another key.
struct value_chain {
union radix_value value;
struct value child;
};
struct prefix_chain {
struct value child;
unsigned len;
uint8_t prefix[0];
};
struct node4 {
uint32_t nr_entries;
uint8_t keys[4];
struct value values[4];
};
struct node16 {
uint32_t nr_entries;
uint8_t keys[16];
struct value values[16];
};
struct node48 {
uint32_t nr_entries;
uint8_t keys[256];
struct value values[48];
};
struct node256 {
struct value values[256];
};
struct radix_tree {
unsigned nr_entries;
struct value root;
};
//----------------------------------------------------------------
struct radix_tree *radix_tree_create(void)
{
struct radix_tree *rt = malloc(sizeof(*rt));
if (rt) {
rt->nr_entries = 0;
rt->root.type = UNSET;
}
return rt;
}
static void _free_node(struct value v, radix_value_dtr dtr, void *context)
{
unsigned i;
struct value_chain *vc;
struct prefix_chain *pc;
struct node4 *n4;
struct node16 *n16;
struct node48 *n48;
struct node256 *n256;
switch (v.type) {
case UNSET:
break;
case VALUE:
if (dtr)
dtr(context, v.value);
break;
case VALUE_CHAIN:
vc = v.value.ptr;
if (dtr)
dtr(context, vc->value);
_free_node(vc->child, dtr, context);
free(vc);
break;
case PREFIX_CHAIN:
pc = v.value.ptr;
_free_node(pc->child, dtr, context);
free(pc);
break;
case NODE4:
n4 = (struct node4 *) v.value.ptr;
for (i = 0; i < n4->nr_entries; i++)
_free_node(n4->values[i], dtr, context);
free(n4);
break;
case NODE16:
n16 = (struct node16 *) v.value.ptr;
for (i = 0; i < n16->nr_entries; i++)
_free_node(n16->values[i], dtr, context);
free(n16);
break;
case NODE48:
n48 = (struct node48 *) v.value.ptr;
for (i = 0; i < n48->nr_entries; i++)
_free_node(n48->values[i], dtr, context);
free(n48);
break;
case NODE256:
n256 = (struct node256 *) v.value.ptr;
for (i = 0; i < 256; i++)
_free_node(n256->values[i], dtr, context);
free(n256);
break;
}
}
void radix_tree_destroy(struct radix_tree *rt, radix_value_dtr dtr, void *context)
{
_free_node(rt->root, dtr, context);
free(rt);
}
static bool _insert(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv);
static bool _insert_unset(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
unsigned len = ke - kb;
if (!len) {
// value
v->type = VALUE;
v->value = rv;
} else {
// prefix -> value
struct prefix_chain *pc = zalloc(sizeof(*pc) + len);
if (!pc)
return false;
pc->child.type = VALUE;
pc->child.value = rv;
pc->len = len;
memcpy(pc->prefix, kb, len);
v->type = PREFIX_CHAIN;
v->value.ptr = pc;
}
return true;
}
static bool _insert_value(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
unsigned len = ke - kb;
if (!len)
// overwrite
v->value = rv;
else {
// value_chain -> value
struct value_chain *vc = zalloc(sizeof(*vc));
if (!vc)
return false;
vc->value = v->value;
if (!_insert(&vc->child, kb, ke, rv)) {
free(vc);
return false;
}
v->type = VALUE_CHAIN;
v->value.ptr = vc;
}
return true;
}
static bool _insert_value_chain(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct value_chain *vc = v->value.ptr;
return _insert(&vc->child, kb, ke, rv);
}
static unsigned min(unsigned lhs, unsigned rhs)
{
if (lhs <= rhs)
return lhs;
else
return rhs;
}
static bool _insert_prefix_chain(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct prefix_chain *pc = v->value.ptr;
if (*kb == pc->prefix[0]) {
// There's a common prefix let's split the chain into two and
// recurse.
struct prefix_chain *pc2;
unsigned i, len = min(pc->len, ke - kb);
for (i = 0; i < len; i++)
if (kb[i] != pc->prefix[i])
break;
pc2 = zalloc(sizeof(*pc2) + pc->len - i);
pc2->len = pc->len - i;
memmove(pc2->prefix, pc->prefix + i, pc2->len);
pc2->child = pc->child;
// FIXME: this trashes pc so we can't back out
pc->child.type = PREFIX_CHAIN;
pc->child.value.ptr = pc2;
pc->len = i;
if (!_insert(&pc->child, kb + i, ke, rv)) {
free(pc2);
return false;
}
} else {
// Stick an n4 in front.
struct node4 *n4 = zalloc(sizeof(*n4));
if (!n4)
return false;
n4->keys[0] = *kb;
if (!_insert(n4->values, kb + 1, ke, rv)) {
free(n4);
return false;
}
if (pc->len) {
n4->keys[1] = pc->prefix[0];
if (pc->len == 1) {
n4->values[1] = pc->child;
free(pc);
} else {
memmove(pc->prefix, pc->prefix + 1, pc->len - 1);
pc->len--;
n4->values[1] = *v;
}
n4->nr_entries = 2;
} else
n4->nr_entries = 1;
v->type = NODE4;
v->value.ptr = n4;
}
return true;
}
static bool _insert_node4(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct node4 *n4 = v->value.ptr;
if (n4->nr_entries == 4) {
struct node16 *n16 = zalloc(sizeof(*n16));
if (!n16)
return false;
n16->nr_entries = 5;
memcpy(n16->keys, n4->keys, sizeof(n4->keys));
memcpy(n16->values, n4->values, sizeof(n4->values));
n16->keys[4] = *kb;
if (!_insert(n16->values + 4, kb + 1, ke, rv)) {
free(n16);
return false;
}
free(n4);
v->type = NODE16;
v->value.ptr = n16;
} else {
n4 = v->value.ptr;
if (!_insert(n4->values + n4->nr_entries, kb + 1, ke, rv))
return false;
n4->keys[n4->nr_entries] = *kb;
n4->nr_entries++;
}
return true;
}
static bool _insert_node16(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct node16 *n16 = v->value.ptr;
if (n16->nr_entries == 16) {
unsigned i;
struct node48 *n48 = zalloc(sizeof(*n48));
if (!n48)
return false;
n48->nr_entries = 17;
memset(n48->keys, 48, sizeof(n48->keys));
for (i = 0; i < 16; i++) {
n48->keys[n16->keys[i]] = i;
n48->values[i] = n16->values[i];
}
n48->keys[*kb] = 16;
if (!_insert(n48->values + 16, kb + 1, ke, rv)) {
free(n48);
return false;
}
free(n16);
v->type = NODE48;
v->value.ptr = n48;
} else {
if (!_insert(n16->values + n16->nr_entries, kb + 1, ke, rv))
return false;
n16->keys[n16->nr_entries] = *kb;
n16->nr_entries++;
}
return true;
}
static bool _insert_node48(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct node48 *n48 = v->value.ptr;
if (n48->nr_entries == 48) {
unsigned i;
struct node256 *n256 = zalloc(sizeof(*n256));
if (!n256)
return false;
for (i = 0; i < 256; i++) {
if (n48->keys[i] >= 48)
continue;
n256->values[i] = n48->values[n48->keys[i]];
}
if (!_insert(n256->values + *kb, kb + 1, ke, rv)) {
free(n256);
return false;
}
free(n48);
v->type = NODE256;
v->value.ptr = n256;
} else {
if (!_insert(n48->values + n48->nr_entries, kb + 1, ke, rv))
return false;
n48->keys[*kb] = n48->nr_entries;
n48->nr_entries++;
}
return true;
}
static bool _insert_node256(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct node256 *n256 = v->value.ptr;
if (!_insert(n256->values + *kb, kb + 1, ke, rv)) {
n256->values[*kb].type = UNSET;
return false;
}
return true;
}
// FIXME: the tree should not be touched if insert fails (eg, OOM)
static bool _insert(struct value *v, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
if (kb == ke) {
if (v->type == UNSET) {
v->type = VALUE;
v->value = rv;
} else if (v->type == VALUE) {
v->value = rv;
} else {
struct value_chain *vc = zalloc(sizeof(*vc));
if (!vc)
return false;
vc->value = rv;
vc->child = *v;
v->type = VALUE_CHAIN;
v->value.ptr = vc;
}
return true;
}
switch (v->type) {
case UNSET:
return _insert_unset(v, kb, ke, rv);
case VALUE:
return _insert_value(v, kb, ke, rv);
case VALUE_CHAIN:
return _insert_value_chain(v, kb, ke, rv);
case PREFIX_CHAIN:
return _insert_prefix_chain(v, kb, ke, rv);
case NODE4:
return _insert_node4(v, kb, ke, rv);
case NODE16:
return _insert_node16(v, kb, ke, rv);
case NODE48:
return _insert_node48(v, kb, ke, rv);
case NODE256:
return _insert_node256(v, kb, ke, rv);
}
// can't get here
return false;
}
struct lookup_result {
struct value *v;
uint8_t *kb;
};
static struct lookup_result _lookup_prefix(struct value *v, uint8_t *kb, uint8_t *ke)
{
unsigned i;
struct value_chain *vc;
struct prefix_chain *pc;
struct node4 *n4;
struct node16 *n16;
struct node48 *n48;
struct node256 *n256;
if (kb == ke)
return (struct lookup_result) {.v = v, .kb = kb};
switch (v->type) {
case UNSET:
case VALUE:
break;
case VALUE_CHAIN:
vc = v->value.ptr;
return _lookup_prefix(&vc->child, kb, ke);
case PREFIX_CHAIN:
pc = v->value.ptr;
if (ke - kb < pc->len)
return (struct lookup_result) {.v = v, .kb = kb};
for (i = 0; i < pc->len; i++)
if (kb[i] != pc->prefix[i])
return (struct lookup_result) {.v = v, .kb = kb};
return _lookup_prefix(&pc->child, kb + pc->len, ke);
case NODE4:
n4 = v->value.ptr;
for (i = 0; i < n4->nr_entries; i++)
if (n4->keys[i] == *kb)
return _lookup_prefix(n4->values + i, kb + 1, ke);
break;
case NODE16:
// FIXME: use binary search or simd?
n16 = v->value.ptr;
for (i = 0; i < n16->nr_entries; i++)
if (n16->keys[i] == *kb)
return _lookup_prefix(n16->values + i, kb + 1, ke);
break;
case NODE48:
n48 = v->value.ptr;
i = n48->keys[*kb];
if (i < 48)
return _lookup_prefix(n48->values + i, kb + 1, ke);
break;
case NODE256:
n256 = v->value.ptr;
return _lookup_prefix(n256->values + *kb, kb + 1, ke);
}
return (struct lookup_result) {.v = v, .kb = kb};
}
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value rv)
{
struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
if (_insert(lr.v, lr.kb, ke, rv)) {
rt->nr_entries++;
return true;
}
return false;
}
void radix_tree_delete(struct radix_tree *rt, uint8_t *key_begin, uint8_t *key_end)
{
assert(0);
}
bool radix_tree_lookup(struct radix_tree *rt,
uint8_t *kb, uint8_t *ke, union radix_value *result)
{
struct value_chain *vc;
struct lookup_result lr = _lookup_prefix(&rt->root, kb, ke);
if (lr.kb == ke) {
switch (lr.v->type) {
case VALUE:
*result = lr.v->value;
return true;
case VALUE_CHAIN:
vc = lr.v->value.ptr;
*result = vc->value;
return true;
default:
return false;
}
}
return false;
}
//----------------------------------------------------------------

View File

@@ -1,43 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_DATA_STRUCT_RADIX_TREE_H
#define BASE_DATA_STRUCT_RADIX_TREE_H
#include <stdbool.h>
#include <stdint.h>
//----------------------------------------------------------------
struct radix_tree;
union radix_value {
void *ptr;
uint64_t n;
};
struct radix_tree *radix_tree_create(void);
typedef void (*radix_value_dtr)(void *context, union radix_value v);
// dtr may be NULL
void radix_tree_destroy(struct radix_tree *rt, radix_value_dtr dtr, void *context);
unsigned radix_tree_size(struct radix_tree *rt);
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v);
void radix_tree_delete(struct radix_tree *rt, uint8_t *kb, uint8_t *ke);
bool radix_tree_lookup(struct radix_tree *rt,
uint8_t *kb, uint8_t *ke, union radix_value *result);
//----------------------------------------------------------------
#endif

View File

@@ -1,23 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_CONTAINER_OF_H
#define BASE_MEMORY_CONTAINER_OF_H
//----------------------------------------------------------------
#define container_of(v, t, head) \
((t *)((const char *)(v) - (const char *)&((t *) 0)->head))
//----------------------------------------------------------------
#endif

View File

@@ -1,31 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_ZALLOC_H
#define BASE_MEMORY_ZALLOC_H
#include <stdlib.h>
#include <string.h>
//----------------------------------------------------------------
static inline void *zalloc(size_t len)
{
void *ptr = malloc(len);
if (ptr)
memset(ptr, 0, len);
return ptr;
}
//----------------------------------------------------------------
#endif

View File

@@ -702,17 +702,29 @@ global {
activation = 1
# Configuration option global/fallback_to_lvm1.
# This setting is no longer used.
# Try running LVM1 tools if LVM cannot communicate with DM.
# This option only applies to 2.4 kernels and is provided to help
# switch between device-mapper kernels and LVM1 kernels. The LVM1
# tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.
# They will stop working once the lvm2 on-disk metadata format is used.
# This configuration option has an automatic default value.
# fallback_to_lvm1 = 0
# fallback_to_lvm1 = @DEFAULT_FALLBACK_TO_LVM1@
# Configuration option global/format.
# This setting is no longer used.
# The default metadata format that commands should use.
# The -M 1|2 option overrides this setting.
#
# Accepted values:
# lvm1
# lvm2
#
# This configuration option has an automatic default value.
# format = "lvm2"
# Configuration option global/format_libraries.
# This setting is no longer used.
# Shared libraries that process different metadata formats.
# If support for LVM1 metadata was compiled as a shared library use
# format_libraries = "liblvm2format1.so"
# This configuration option does not have a default value defined.
# Configuration option global/segment_libraries.
@@ -809,6 +821,13 @@ global {
# encountered the internal error. Please only enable for debugging.
abort_on_internal_errors = 0
# Configuration option global/detect_internal_vg_cache_corruption.
# Internal verification of VG structures.
# Check if CRC matches when a parsed VG is used multiple times. This
# is useful to catch unexpected changes to cached VG structures.
# Please only enable for debugging.
detect_internal_vg_cache_corruption = 0
# Configuration option global/metadata_read_only.
# No operations that change on-disk metadata are permitted.
# Additionally, read-only commands that encounter metadata in need of

309
configure vendored
View File

@@ -653,6 +653,7 @@ UDEV_RULES
UDEV_PC
THIN
TESTSUITE_DATA
TESTING
STATIC_LINK
STATICDIR
SNAPSHOTS
@@ -663,6 +664,7 @@ SBINDIR
REPLICATORS
READLINE_LIBS
RT_LIBS
RAID
PYTHON3DIR
PYTHON2DIR
PYTHON3_LIBDIRS
@@ -675,6 +677,7 @@ PYTHON_BINDINGS
PYTHON3
PTHREAD_LIBS
M_LIBS
POOL
PKGCONFIG
ODIRECT
OCFDIR
@@ -689,6 +692,8 @@ LVM_MINOR
LVM_MAJOR
LVM_LIBAPI
LVM_VERSION
LVM1_FALLBACK
LVM1
LIB_SUFFIX
LDDEPS
JOBS
@@ -716,6 +721,7 @@ DEFAULT_RAID10_SEGTYPE
DEFAULT_PROFILE_SUBDIR
DEFAULT_PID_DIR
DEFAULT_MIRROR_SEGTYPE
DEFAULT_FALLBACK_TO_LVM1
DEFAULT_LOCK_DIR
DEFAULT_DM_RUN_DIR
DEFAULT_DATA_ALIGNMENT
@@ -775,6 +781,8 @@ LOCKD_SANLOCK_LIBS
LOCKD_SANLOCK_CFLAGS
VALGRIND_LIBS
VALGRIND_CFLAGS
CUNIT_LIBS
CUNIT_CFLAGS
GENPNG
GENHTML
LCOV
@@ -876,7 +884,6 @@ infodir
docdir
oldincludedir
includedir
runstatedir
localstatedir
sharedstatedir
sysconfdir
@@ -908,9 +915,13 @@ with_device_gid
with_device_mode
with_device_nodes_on
with_default_name_mangling
enable_lvm1_fallback
with_lvm1
with_pool
with_cluster
with_snapshots
with_mirrors
with_raid
with_default_mirror_segtype
with_default_raid10_segtype
with_default_sparse_segtype
@@ -940,6 +951,7 @@ with_cmirrord_pidfile
enable_debug
with_optimisation
enable_profiling
enable_testing
enable_valgrind_pool
enable_devmapper
enable_lvmetad
@@ -1031,6 +1043,8 @@ DLM_CFLAGS
DLM_LIBS
SACKPT_CFLAGS
SACKPT_LIBS
CUNIT_CFLAGS
CUNIT_LIBS
VALGRIND_CFLAGS
VALGRIND_LIBS
LOCKD_SANLOCK_CFLAGS
@@ -1084,7 +1098,6 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE}'
@@ -1337,15 +1350,6 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-runstatedir | --runstatedir | --runstatedi | --runstated \
| --runstate | --runstat | --runsta | --runst | --runs \
| --run | --ru | --r)
ac_prev=runstatedir ;;
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
| --run=* | --ru=* | --r=*)
runstatedir=$ac_optarg ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1483,7 +1487,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir runstatedir
libdir localedir mandir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@@ -1636,7 +1640,6 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@@ -1673,6 +1676,8 @@ Optional Features:
speeds up one-time build.
--enable-static_link use this to link the tools to their libraries
statically (default is dynamic linking
--enable-lvm1_fallback use this to fall back and use LVM1 binaries if
device-mapper is missing from the kernel
--disable-thin_check_needs_check
required if thin_check version is < 0.3.0
--disable-cache_check_needs_check
@@ -1684,6 +1689,7 @@ Optional Features:
--enable-cmirrord enable the cluster mirror log daemon
--enable-debug enable debugging
--enable-profiling gather gcov profiling data
--enable-testing enable testing targets in the makefile
--enable-valgrind-pool enable valgrind awareness of pools
--disable-devmapper disable LVM2 device-mapper interaction
--enable-lvmetad enable the LVM Metadata Daemon
@@ -1739,10 +1745,15 @@ Optional Packages:
create nodes on resume or create [ON=resume]
--with-default-name-mangling=MANGLING
default name mangling: auto/none/hex [auto]
--with-lvm1=TYPE LVM1 metadata support: internal/shared/none
[internal]
--with-pool=TYPE GFS pool read-only support: internal/shared/none
[internal]
--with-cluster=TYPE cluster LVM locking support: internal/shared/none
[internal]
--with-snapshots=TYPE snapshot support: internal/shared/none [internal]
--with-mirrors=TYPE mirror support: internal/shared/none [internal]
--with-raid=TYPE raid support: internal/shared/none [internal]
--with-default-mirror-segtype=TYPE
default mirror segtype: raid1/mirror [raid1]
--with-default-raid10-segtype=TYPE
@@ -1867,6 +1878,9 @@ Some influential environment variables:
SACKPT_CFLAGS
C compiler flags for SACKPT, overriding pkg-config
SACKPT_LIBS linker flags for SACKPT, overriding pkg-config
CUNIT_CFLAGS
C compiler flags for CUNIT, overriding pkg-config
CUNIT_LIBS linker flags for CUNIT, overriding pkg-config
VALGRIND_CFLAGS
C compiler flags for VALGRIND, overriding pkg-config
VALGRIND_LIBS
@@ -4355,7 +4369,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
CFLAGS=$save_CFLAGS
CXXFLAGS=$save_CXXFLAGS
PATH_SBIN="$PATH:/usr/sbin:/sbin"
ac_ext=c
@@ -6141,7 +6154,7 @@ fi
for ac_header in assert.h ctype.h dirent.h errno.h fcntl.h float.h \
getopt.h inttypes.h langinfo.h libaio.h libgen.h limits.h locale.h paths.h \
getopt.h inttypes.h langinfo.h libgen.h limits.h locale.h paths.h \
signal.h stdarg.h stddef.h stdio.h stdlib.h string.h sys/file.h \
sys/ioctl.h syslog.h sys/mman.h sys/param.h sys/resource.h sys/stat.h \
sys/time.h sys/types.h sys/utsname.h sys/wait.h time.h \
@@ -8371,6 +8384,78 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable lvm1 fallback" >&5
$as_echo_n "checking whether to enable lvm1 fallback... " >&6; }
# Check whether --enable-lvm1_fallback was given.
if test "${enable_lvm1_fallback+set}" = set; then :
enableval=$enable_lvm1_fallback; LVM1_FALLBACK=$enableval
else
LVM1_FALLBACK=no
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LVM1_FALLBACK" >&5
$as_echo "$LVM1_FALLBACK" >&6; }
if test "$LVM1_FALLBACK" = yes; then
DEFAULT_FALLBACK_TO_LVM1=1
$as_echo "#define LVM1_FALLBACK 1" >>confdefs.h
else
DEFAULT_FALLBACK_TO_LVM1=0
fi
cat >>confdefs.h <<_ACEOF
#define DEFAULT_FALLBACK_TO_LVM1 $DEFAULT_FALLBACK_TO_LVM1
_ACEOF
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include support for lvm1 metadata" >&5
$as_echo_n "checking whether to include support for lvm1 metadata... " >&6; }
# Check whether --with-lvm1 was given.
if test "${with_lvm1+set}" = set; then :
withval=$with_lvm1; LVM1=$withval
else
LVM1=internal
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LVM1" >&5
$as_echo "$LVM1" >&6; }
case "$LVM1" in
none|shared) ;;
internal)
$as_echo "#define LVM1_INTERNAL 1" >>confdefs.h
;;
*) as_fn_error $? "--with-lvm1 parameter invalid" "$LINENO" 5 ;;
esac
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include support for GFS pool metadata" >&5
$as_echo_n "checking whether to include support for GFS pool metadata... " >&6; }
# Check whether --with-pool was given.
if test "${with_pool+set}" = set; then :
withval=$with_pool; POOL=$withval
else
POOL=internal
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $POOL" >&5
$as_echo "$POOL" >&6; }
case "$POOL" in
none|shared) ;;
internal)
$as_echo "#define POOL_INTERNAL 1" >>confdefs.h
;;
*) as_fn_error $? "--with-pool parameter invalid" "$LINENO" 5
esac
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include support for cluster locking" >&5
$as_echo_n "checking whether to include support for cluster locking... " >&6; }
@@ -8436,6 +8521,19 @@ $as_echo "#define MIRRORED_INTERNAL 1" >>confdefs.h
esac
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include raid" >&5
$as_echo_n "checking whether to include raid... " >&6; }
# Check whether --with-raid was given.
if test "${with_raid+set}" = set; then :
withval=$with_raid; RAID=$withval
else
RAID=internal
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $RAID" >&5
$as_echo "$RAID" >&6; }
# Check whether --with-default-mirror-segtype was given.
if test "${with_default_mirror_segtype+set}" = set; then :
@@ -8452,10 +8550,15 @@ else
DEFAULT_RAID10_SEGTYPE="raid10"
fi
case "$RAID" in
none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror"
test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;;
shared) ;;
internal)
$as_echo "#define RAID_INTERNAL 1" >>confdefs.h
;;
*) as_fn_error $? "--with-raid parameter invalid" "$LINENO" 5 ;;
esac
cat >>confdefs.h <<_ACEOF
@@ -8569,7 +8672,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8612,7 +8715,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8693,7 +8796,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8736,7 +8839,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8800,7 +8903,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8843,7 +8946,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8907,7 +9010,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -8950,7 +9053,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9118,7 +9221,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9161,7 +9264,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9254,7 +9357,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9297,7 +9400,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9361,7 +9464,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9404,7 +9507,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9468,7 +9571,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -9511,7 +9614,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -11488,6 +11591,114 @@ $as_echo "$as_me: WARNING: GD.pm perl module is not installed" >&2;}
fi
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable unit testing" >&5
$as_echo_n "checking whether to enable unit testing... " >&6; }
# Check whether --enable-testing was given.
if test "${enable_testing+set}" = set; then :
enableval=$enable_testing; TESTING=$enableval
else
TESTING=no
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $TESTING" >&5
$as_echo "$TESTING" >&6; }
if test "$TESTING" = yes; then
pkg_config_init
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for CUNIT" >&5
$as_echo_n "checking for CUNIT... " >&6; }
if test -n "$CUNIT_CFLAGS"; then
pkg_cv_CUNIT_CFLAGS="$CUNIT_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"cunit >= 2.0\""; } >&5
($PKG_CONFIG --exists --print-errors "cunit >= 2.0") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_CUNIT_CFLAGS=`$PKG_CONFIG --cflags "cunit >= 2.0" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$CUNIT_LIBS"; then
pkg_cv_CUNIT_LIBS="$CUNIT_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"cunit >= 2.0\""; } >&5
($PKG_CONFIG --exists --print-errors "cunit >= 2.0") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_CUNIT_LIBS=`$PKG_CONFIG --libs "cunit >= 2.0" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
CUNIT_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "cunit >= 2.0" 2>&1`
else
CUNIT_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "cunit >= 2.0" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$CUNIT_PKG_ERRORS" >&5
as_fn_error $? "Package requirements (cunit >= 2.0) were not met:
$CUNIT_PKG_ERRORS
Consider adjusting the PKG_CONFIG_PATH environment variable if you
installed software in a non-standard prefix.
Alternatively, you may set the environment variables CUNIT_CFLAGS
and CUNIT_LIBS to avoid the need to call pkg-config.
See the pkg-config man page for more details." "$LINENO" 5
elif test $pkg_failed = untried; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it
is in your PATH or set the PKG_CONFIG environment variable to the full
path to pkg-config.
Alternatively, you may set the environment variables CUNIT_CFLAGS
and CUNIT_LIBS to avoid the need to call pkg-config.
See the pkg-config man page for more details.
To get pkg-config, see <http://pkg-config.freedesktop.org/>.
See \`config.log' for more details" "$LINENO" 5; }
else
CUNIT_CFLAGS=$pkg_cv_CUNIT_CFLAGS
CUNIT_LIBS=$pkg_cv_CUNIT_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
fi
fi
################################################################################
TESTSUITE_DATA='${datarootdir}/lvm2-testsuite'
# double eval needed ${datarootdir} -> ${prefix}/share -> real path
@@ -12688,7 +12899,7 @@ if ${am_cv_pathless_PYTHON+:} false; then :
$as_echo_n "(cached) " >&6
else
for am_cv_pathless_PYTHON in python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
for am_cv_pathless_PYTHON in python python2 python3 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
test "$am_cv_pathless_PYTHON" = none && break
prog="import sys
# split strings by '.' and convert to numeric. Append some zeros
@@ -13256,7 +13467,7 @@ if ${am_cv_pathless_PYTHON+:} false; then :
$as_echo_n "(cached) " >&6
else
for am_cv_pathless_PYTHON in python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
for am_cv_pathless_PYTHON in python python2 python3 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do
test "$am_cv_pathless_PYTHON" = none && break
prog="import sys
# split strings by '.' and convert to numeric. Append some zeros
@@ -13862,6 +14073,8 @@ fi
################################################################################
if [ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
-o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \
-o "$RAID" = shared -o "$CACHE" = shared \
\) -a "$STATIC_LINK" = yes ]; then
as_fn_error $? "Features cannot be 'shared' when building statically" "$LINENO" 5
fi
@@ -15097,7 +15310,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -15140,7 +15353,7 @@ else
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH_SBIN
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
@@ -15551,6 +15764,12 @@ _ACEOF
@@ -15559,7 +15778,7 @@ _ACEOF
################################################################################
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/dmfilemapd/Makefile daemons/lvmdbusd/Makefile daemons/lvmdbusd/lvmdbusd daemons/lvmdbusd/lvmdb.py daemons/lvmdbusd/lvm_shell_proxy.py daemons/lvmdbusd/path.py daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile device_mapper/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/locking/Makefile include/lvm-version.h libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/com.redhat.lvmdbus1.service scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmdbusd_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/lvmdump.sh scripts/Makefile test/Makefile test/api/Makefile test/api/python_lvm_unit.py test/unit/Makefile tools/Makefile udev/Makefile"
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/dmfilemapd/Makefile daemons/lvmdbusd/Makefile daemons/lvmdbusd/lvmdbusd daemons/lvmdbusd/lvmdb.py daemons/lvmdbusd/lvm_shell_proxy.py daemons/lvmdbusd/path.py daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile include/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/com.redhat.lvmdbus1.service scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmdbusd_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/lvmdump.sh scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
@@ -16276,7 +16495,6 @@ do
"daemons/lvmetad/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmetad/Makefile" ;;
"daemons/lvmpolld/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmpolld/Makefile" ;;
"daemons/lvmlockd/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmlockd/Makefile" ;;
"device_mapper/Makefile") CONFIG_FILES="$CONFIG_FILES device_mapper/Makefile" ;;
"conf/Makefile") CONFIG_FILES="$CONFIG_FILES conf/Makefile" ;;
"conf/example.conf") CONFIG_FILES="$CONFIG_FILES conf/example.conf" ;;
"conf/lvmlocal.conf") CONFIG_FILES="$CONFIG_FILES conf/lvmlocal.conf" ;;
@@ -16285,8 +16503,15 @@ do
"include/.symlinks") CONFIG_FILES="$CONFIG_FILES include/.symlinks" ;;
"include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;;
"lib/Makefile") CONFIG_FILES="$CONFIG_FILES lib/Makefile" ;;
"lib/format1/Makefile") CONFIG_FILES="$CONFIG_FILES lib/format1/Makefile" ;;
"lib/format_pool/Makefile") CONFIG_FILES="$CONFIG_FILES lib/format_pool/Makefile" ;;
"lib/locking/Makefile") CONFIG_FILES="$CONFIG_FILES lib/locking/Makefile" ;;
"lib/mirror/Makefile") CONFIG_FILES="$CONFIG_FILES lib/mirror/Makefile" ;;
"include/lvm-version.h") CONFIG_FILES="$CONFIG_FILES include/lvm-version.h" ;;
"lib/raid/Makefile") CONFIG_FILES="$CONFIG_FILES lib/raid/Makefile" ;;
"lib/snapshot/Makefile") CONFIG_FILES="$CONFIG_FILES lib/snapshot/Makefile" ;;
"lib/thin/Makefile") CONFIG_FILES="$CONFIG_FILES lib/thin/Makefile" ;;
"lib/cache_segtype/Makefile") CONFIG_FILES="$CONFIG_FILES lib/cache_segtype/Makefile" ;;
"libdaemon/Makefile") CONFIG_FILES="$CONFIG_FILES libdaemon/Makefile" ;;
"libdaemon/client/Makefile") CONFIG_FILES="$CONFIG_FILES libdaemon/client/Makefile" ;;
"libdaemon/server/Makefile") CONFIG_FILES="$CONFIG_FILES libdaemon/server/Makefile" ;;
@@ -16327,10 +16552,12 @@ do
"scripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Makefile" ;;
"test/Makefile") CONFIG_FILES="$CONFIG_FILES test/Makefile" ;;
"test/api/Makefile") CONFIG_FILES="$CONFIG_FILES test/api/Makefile" ;;
"test/api/python_lvm_unit.py") CONFIG_FILES="$CONFIG_FILES test/api/python_lvm_unit.py" ;;
"test/unit/Makefile") CONFIG_FILES="$CONFIG_FILES test/unit/Makefile" ;;
"tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;;
"udev/Makefile") CONFIG_FILES="$CONFIG_FILES udev/Makefile" ;;
"unit-tests/datastruct/Makefile") CONFIG_FILES="$CONFIG_FILES unit-tests/datastruct/Makefile" ;;
"unit-tests/regex/Makefile") CONFIG_FILES="$CONFIG_FILES unit-tests/regex/Makefile" ;;
"unit-tests/mm/Makefile") CONFIG_FILES="$CONFIG_FILES unit-tests/mm/Makefile" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac

View File

@@ -77,7 +77,6 @@ AC_PROG_CC
AC_PROG_CXX
CFLAGS=$save_CFLAGS
CXXFLAGS=$save_CXXFLAGS
PATH_SBIN="$PATH:/usr/sbin:/sbin"
dnl probably no longer needed in 2008, but...
AC_PROG_GCC_TRADITIONAL
@@ -103,7 +102,7 @@ AC_HEADER_SYS_WAIT
AC_HEADER_TIME
AC_CHECK_HEADERS([assert.h ctype.h dirent.h errno.h fcntl.h float.h \
getopt.h inttypes.h langinfo.h libaio.h libgen.h limits.h locale.h paths.h \
getopt.h inttypes.h langinfo.h libgen.h limits.h locale.h paths.h \
signal.h stdarg.h stddef.h stdio.h stdlib.h string.h sys/file.h \
sys/ioctl.h syslog.h sys/mman.h sys/param.h sys/resource.h sys/stat.h \
sys/time.h sys/types.h sys/utsname.h sys/wait.h time.h \
@@ -282,6 +281,58 @@ esac
AC_MSG_RESULT($MANGLING)
AC_DEFINE_UNQUOTED([DEFAULT_DM_NAME_MANGLING], $mangling, [Define default name mangling behaviour])
################################################################################
dnl -- LVM1 tool fallback option
AC_MSG_CHECKING(whether to enable lvm1 fallback)
AC_ARG_ENABLE(lvm1_fallback,
AC_HELP_STRING([--enable-lvm1_fallback],
[use this to fall back and use LVM1 binaries if
device-mapper is missing from the kernel]),
LVM1_FALLBACK=$enableval, LVM1_FALLBACK=no)
AC_MSG_RESULT($LVM1_FALLBACK)
if test "$LVM1_FALLBACK" = yes; then
DEFAULT_FALLBACK_TO_LVM1=1
AC_DEFINE([LVM1_FALLBACK], 1, [Define to 1 if 'lvm' should fall back to using LVM1 binaries if device-mapper is missing from the kernel])
else
DEFAULT_FALLBACK_TO_LVM1=0
fi
AC_DEFINE_UNQUOTED(DEFAULT_FALLBACK_TO_LVM1, [$DEFAULT_FALLBACK_TO_LVM1],
[Fall back to LVM1 by default if device-mapper is missing from the kernel.])
################################################################################
dnl -- format1 inclusion type
AC_MSG_CHECKING(whether to include support for lvm1 metadata)
AC_ARG_WITH(lvm1,
AC_HELP_STRING([--with-lvm1=TYPE],
[LVM1 metadata support: internal/shared/none [internal]]),
LVM1=$withval, LVM1=internal)
AC_MSG_RESULT($LVM1)
case "$LVM1" in
none|shared) ;;
internal) AC_DEFINE([LVM1_INTERNAL], 1,
[Define to 1 to include built-in support for LVM1 metadata.]) ;;
*) AC_MSG_ERROR([--with-lvm1 parameter invalid]) ;;
esac
################################################################################
dnl -- format_pool inclusion type
AC_MSG_CHECKING(whether to include support for GFS pool metadata)
AC_ARG_WITH(pool,
AC_HELP_STRING([--with-pool=TYPE],
[GFS pool read-only support: internal/shared/none [internal]]),
POOL=$withval, POOL=internal)
AC_MSG_RESULT($POOL)
case "$POOL" in
none|shared) ;;
internal) AC_DEFINE([POOL_INTERNAL], 1,
[Define to 1 to include built-in support for GFS pool metadata.]) ;;
*) AC_MSG_ERROR([--with-pool parameter invalid])
esac
################################################################################
dnl -- cluster_locking inclusion type
AC_MSG_CHECKING(whether to include support for cluster locking)
@@ -332,6 +383,13 @@ esac
################################################################################
dnl -- raid inclusion type
AC_MSG_CHECKING(whether to include raid)
AC_ARG_WITH(raid,
AC_HELP_STRING([--with-raid=TYPE],
[raid support: internal/shared/none [internal]]),
RAID=$withval, RAID=internal)
AC_MSG_RESULT($RAID)
AC_ARG_WITH(default-mirror-segtype,
AC_HELP_STRING([--with-default-mirror-segtype=TYPE],
[default mirror segtype: raid1/mirror [raid1]]),
@@ -340,9 +398,14 @@ AC_ARG_WITH(default-raid10-segtype,
AC_HELP_STRING([--with-default-raid10-segtype=TYPE],
[default mirror segtype: raid10/mirror [raid10]]),
DEFAULT_RAID10_SEGTYPE=$withval, DEFAULT_RAID10_SEGTYPE="raid10")
AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.])
case "$RAID" in
none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror"
test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;;
shared) ;;
internal) AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.]) ;;
*) AC_MSG_ERROR([--with-raid parameter invalid]) ;;
esac
AC_DEFINE_UNQUOTED([DEFAULT_MIRROR_SEGTYPE], ["$DEFAULT_MIRROR_SEGTYPE"],
[Default segtype used for mirror volumes.])
@@ -405,7 +468,7 @@ case "$THIN" in
internal|shared)
# Empty means a config way to ignore thin checking
if test "$THIN_CHECK_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_CHECK_CMD, thin_check, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_CHECK_CMD, thin_check)
if test -z "$THIN_CHECK_CMD"; then
AC_MSG_WARN([thin_check not found in path $PATH])
THIN_CHECK_CMD=/usr/sbin/thin_check
@@ -429,7 +492,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin dumping
if test "$THIN_DUMP_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_DUMP_CMD, thin_dump, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_DUMP_CMD, thin_dump)
test -z "$THIN_DUMP_CMD" && {
AC_MSG_WARN(thin_dump not found in path $PATH)
THIN_DUMP_CMD=/usr/sbin/thin_dump
@@ -438,7 +501,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin repairing
if test "$THIN_REPAIR_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_REPAIR_CMD, thin_repair, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_REPAIR_CMD, thin_repair)
test -z "$THIN_REPAIR_CMD" && {
AC_MSG_WARN(thin_repair not found in path $PATH)
THIN_REPAIR_CMD=/usr/sbin/thin_repair
@@ -447,7 +510,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin restoring
if test "$THIN_RESTORE_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_RESTORE_CMD, thin_restore, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_RESTORE_CMD, thin_restore)
test -z "$THIN_RESTORE_CMD" && {
AC_MSG_WARN(thin_restore not found in path $PATH)
THIN_RESTORE_CMD=/usr/sbin/thin_restore
@@ -519,7 +582,7 @@ case "$CACHE" in
internal|shared)
# Empty means a config way to ignore cache checking
if test "$CACHE_CHECK_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_CHECK_CMD, cache_check, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_CHECK_CMD, cache_check)
if test -z "$CACHE_CHECK_CMD"; then
AC_MSG_WARN([cache_check not found in path $PATH])
CACHE_CHECK_CMD=/usr/sbin/cache_check
@@ -554,7 +617,7 @@ case "$CACHE" in
fi
# Empty means a config way to ignore cache dumping
if test "$CACHE_DUMP_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_DUMP_CMD, cache_dump, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_DUMP_CMD, cache_dump)
test -z "$CACHE_DUMP_CMD" && {
AC_MSG_WARN(cache_dump not found in path $PATH)
CACHE_DUMP_CMD=/usr/sbin/cache_dump
@@ -563,7 +626,7 @@ case "$CACHE" in
fi
# Empty means a config way to ignore cache repairing
if test "$CACHE_REPAIR_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_REPAIR_CMD, cache_repair, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_REPAIR_CMD, cache_repair)
test -z "$CACHE_REPAIR_CMD" && {
AC_MSG_WARN(cache_repair not found in path $PATH)
CACHE_REPAIR_CMD=/usr/sbin/cache_repair
@@ -572,7 +635,7 @@ case "$CACHE" in
fi
# Empty means a config way to ignore cache restoring
if test "$CACHE_RESTORE_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_RESTORE_CMD, cache_restore, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_RESTORE_CMD, cache_restore)
test -z "$CACHE_RESTORE_CMD" && {
AC_MSG_WARN(cache_restore not found in path $PATH)
CACHE_RESTORE_CMD=/usr/sbin/cache_restore
@@ -1003,6 +1066,20 @@ if test "$PROFILING" = yes; then
fi
fi
################################################################################
dnl -- Enable testing
AC_MSG_CHECKING(whether to enable unit testing)
AC_ARG_ENABLE(testing,
AC_HELP_STRING([--enable-testing],
[enable testing targets in the makefile]),
TESTING=$enableval, TESTING=no)
AC_MSG_RESULT($TESTING)
if test "$TESTING" = yes; then
pkg_config_init
PKG_CHECK_MODULES(CUNIT, cunit >= 2.0)
fi
################################################################################
dnl -- Set LVM2 testsuite data
TESTSUITE_DATA='${datarootdir}/lvm2-testsuite'
@@ -1539,6 +1616,8 @@ AC_CHECK_LIB(dl, dlopen,
################################################################################
dnl -- Check for shared/static conflicts
if [[ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
-o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \
-o "$RAID" = shared -o "$CACHE" = shared \
\) -a "$STATIC_LINK" = yes ]]; then
AC_MSG_ERROR([Features cannot be 'shared' when building statically])
fi
@@ -1791,7 +1870,7 @@ if test "$BUILD_DMFILEMAPD" = yes; then
fi
################################################################################
AC_PATH_TOOL(MODPROBE_CMD, modprobe, [], [$PATH_SBIN])
AC_PATH_TOOL(MODPROBE_CMD, modprobe)
if test -n "$MODPROBE_CMD"; then
AC_DEFINE_UNQUOTED([MODPROBE_CMD], ["$MODPROBE_CMD"], [The path to 'modprobe', if available.])
@@ -1967,6 +2046,7 @@ AC_SUBST(DEFAULT_CACHE_SUBDIR)
AC_SUBST(DEFAULT_DATA_ALIGNMENT)
AC_SUBST(DEFAULT_DM_RUN_DIR)
AC_SUBST(DEFAULT_LOCK_DIR)
AC_SUBST(DEFAULT_FALLBACK_TO_LVM1)
AC_SUBST(DEFAULT_MIRROR_SEGTYPE)
AC_SUBST(DEFAULT_PID_DIR)
AC_SUBST(DEFAULT_PROFILE_SUBDIR)
@@ -1997,6 +2077,8 @@ AC_SUBST(JOBS)
AC_SUBST(LDDEPS)
AC_SUBST(LIBS)
AC_SUBST(LIB_SUFFIX)
AC_SUBST(LVM1)
AC_SUBST(LVM1_FALLBACK)
AC_SUBST(LVM_VERSION)
AC_SUBST(LVM_LIBAPI)
AC_SUBST(LVM_MAJOR)
@@ -2013,6 +2095,7 @@ AC_SUBST(OCF)
AC_SUBST(OCFDIR)
AC_SUBST(ODIRECT)
AC_SUBST(PKGCONFIG)
AC_SUBST(POOL)
AC_SUBST(M_LIBS)
AC_SUBST(PTHREAD_LIBS)
AC_SUBST(PYTHON2)
@@ -2028,6 +2111,7 @@ AC_SUBST(PYTHON2DIR)
AC_SUBST(PYTHON3DIR)
AC_SUBST(QUORUM_CFLAGS)
AC_SUBST(QUORUM_LIBS)
AC_SUBST(RAID)
AC_SUBST(RT_LIBS)
AC_SUBST(READLINE_LIBS)
AC_SUBST(REPLICATORS)
@@ -2043,6 +2127,7 @@ AC_SUBST(SYSTEMD_LIBS)
AC_SUBST(SNAPSHOTS)
AC_SUBST(STATICDIR)
AC_SUBST(STATIC_LINK)
AC_SUBST(TESTING)
AC_SUBST(TESTSUITE_DATA)
AC_SUBST(THIN)
AC_SUBST(THIN_CHECK_CMD)
@@ -2108,7 +2193,6 @@ daemons/lvmdbusd/path.py
daemons/lvmetad/Makefile
daemons/lvmpolld/Makefile
daemons/lvmlockd/Makefile
device_mapper/Makefile
conf/Makefile
conf/example.conf
conf/lvmlocal.conf
@@ -2117,8 +2201,15 @@ conf/metadata_profile_template.profile
include/.symlinks
include/Makefile
lib/Makefile
lib/format1/Makefile
lib/format_pool/Makefile
lib/locking/Makefile
lib/mirror/Makefile
include/lvm-version.h
lib/raid/Makefile
lib/snapshot/Makefile
lib/thin/Makefile
lib/cache_segtype/Makefile
libdaemon/Makefile
libdaemon/client/Makefile
libdaemon/server/Makefile
@@ -2159,10 +2250,12 @@ scripts/lvmdump.sh
scripts/Makefile
test/Makefile
test/api/Makefile
test/api/python_lvm_unit.py
test/unit/Makefile
tools/Makefile
udev/Makefile
unit-tests/datastruct/Makefile
unit-tests/regex/Makefile
unit-tests/mm/Makefile
])
AC_OUTPUT

View File

@@ -74,7 +74,7 @@ TARGETS = \
include $(top_builddir)/make.tmpl
LIBS += $(LVMINTERNAL_LIBS) -ldevmapper $(PTHREAD_LIBS) -laio
LIBS += $(LVMINTERNAL_LIBS) -ldevmapper $(PTHREAD_LIBS)
CFLAGS += -fno-strict-aliasing $(EXTRA_EXEC_CFLAGS)
INSTALL_TARGETS = \

View File

@@ -108,6 +108,7 @@ int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
lock_flags = args[1];
lockname = &args[2];
/* Check to see if the VG is in use by LVM1 */
status = do_check_lvm1(lockname);
do_lock_vg(lock_cmd, lock_flags, lockname);
break;

View File

@@ -1999,9 +1999,6 @@ static int send_message(void *buf, int msglen, const char *csid, int fd,
return clops->cluster_send_message(buf, msglen, csid, errtext);
}
if (fd < 0)
return 0;
/* Make sure it all goes */
for (ptr = 0; ptr < msglen;) {
if ((len = write(fd, (char*)buf + ptr, msglen - ptr)) <= 0) {

View File

@@ -639,6 +639,16 @@ int post_lock_lv(unsigned char command, unsigned char lock_flags,
return 0;
}
/* Check if a VG is in use by LVM1 so we don't stomp on it */
int do_check_lvm1(const char *vgname)
{
int status;
status = check_lvm1_vg_inactive(cmd, vgname);
return status == 1 ? 0 : EBUSY;
}
int do_refresh_cache(void)
{
DEBUGLOG("Refreshing context\n");
@@ -651,9 +661,10 @@ int do_refresh_cache(void)
return -1;
}
init_full_scan_done(0);
init_ignore_suspended_devices(1);
lvmcache_force_next_label_scan();
lvmcache_label_scan(cmd);
label_scan_destroy(cmd); /* destroys bcache (to close devs), keeps lvmcache */
dm_pool_empty(cmd->mem);
pthread_mutex_unlock(&lvm_lock);
@@ -796,7 +807,8 @@ static void lvm2_log_fn(int level, const char *file, int line, int dm_errno,
if (level != _LOG_ERR && level != _LOG_FATAL)
return;
(void) dm_strncpy(last_error, message, sizeof(last_error));
strncpy(last_error, message, sizeof(last_error));
last_error[sizeof(last_error)-1] = '\0';
}
/* This checks some basic cluster-LVM configuration stuff */

View File

@@ -25,6 +25,7 @@ extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
extern const char *do_lock_query(char *resource);
extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern int do_check_lvm1(const char *vgname);
extern int do_refresh_cache(void);
extern int init_clvm(struct dm_hash_table *excl_uuid);
extern void destroy_lvm(void);

View File

@@ -166,9 +166,6 @@ int cluster_send(struct clog_request *rq)
{
int r;
int found = 0;
#if CMIRROR_HAS_CHECKPOINT
int count = 0;
#endif
struct iovec iov;
struct clog_cpg *entry;
@@ -206,6 +203,8 @@ int cluster_send(struct clog_request *rq)
#if CMIRROR_HAS_CHECKPOINT
do {
int count = 0;
r = cpg_mcast_joined(entry->handle, CPG_TYPE_AGREED, &iov, 1);
if (r != SA_AIS_ERR_TRY_AGAIN)
break;
@@ -1631,7 +1630,7 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
size = ((strlen(uuid) + 1) > CPG_MAX_NAME_LENGTH) ?
CPG_MAX_NAME_LENGTH : (strlen(uuid) + 1);
(void) dm_strncpy(new->name.value, uuid, size);
strncpy(new->name.value, uuid, size);
new->name.length = (uint32_t)size;
new->luid = luid;

View File

@@ -451,19 +451,15 @@ static int _clog_ctr(char *uuid, uint64_t luid,
lc->skip_bit_warning = region_count;
lc->disk_fd = -1;
lc->log_dev_failed = 0;
if (!dm_strncpy(lc->uuid, uuid, DM_UUID_LEN)) {
LOG_ERROR("Cannot use too long UUID %s.", uuid);
r = -EINVAL;
goto fail;
}
strncpy(lc->uuid, uuid, DM_UUID_LEN);
lc->luid = luid;
if (get_log(lc->uuid, lc->luid) ||
get_pending_log(lc->uuid, lc->luid)) {
LOG_ERROR("[%s/%" PRIu64 "u] Log already exists, unable to create.",
SHORT_UUID(lc->uuid), lc->luid);
r = -EINVAL;
goto fail;
dm_free(lc);
return -EINVAL;
}
dm_list_init(&lc->mark_list);

View File

@@ -14,6 +14,7 @@
#define _LVM_CLOG_LOGGING_H
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "configure.h"
#include <stdio.h>

View File

@@ -754,7 +754,6 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
struct thread_status *thread;
struct timespec timeout;
time_t curr_time;
int ret;
DEBUGLOG("Timeout thread starting.");
pthread_cleanup_push(_exit_timeout, NULL);
@@ -776,10 +775,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
} else {
DEBUGLOG("Sending SIGALRM to Thr %x for timeout.",
(int) thread->thread);
ret = pthread_kill(thread->thread, SIGALRM);
if (ret && (ret != ESRCH))
log_error("Unable to wakeup Thr %x for timeout: %s.",
(int) thread->thread, strerror(ret));
pthread_kill(thread->thread, SIGALRM);
}
_unlock_mutex();
}

View File

@@ -754,10 +754,11 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
uuid = dm_task_get_uuid(dmt);
/* FIXME Distinguish errors connecting to daemon */
if ((ret = _do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0))) {
if (_do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0)) {
log_debug("%s: device not registered.", dm_task_get_name(dmt));
ret = -ENOENT;
goto fail;
}

View File

@@ -16,7 +16,23 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SUBDIRS += lvm2 snapshot raid thin mirror
SUBDIRS += lvm2
ifneq ("@MIRRORS@", "none")
SUBDIRS += mirror
endif
ifneq ("@SNAPSHOTS@", "none")
SUBDIRS += snapshot
endif
ifneq ("@RAID@", "none")
SUBDIRS += raid
endif
ifneq ("@THIN@", "none")
SUBDIRS += thin
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = lvm2 mirror snapshot raid thin

View File

@@ -64,23 +64,23 @@ DM_EVENT_LOG_FN("thin")
static int _run_command(struct dso_state *state)
{
char val[16];
char val[3][36];
char *env[] = { val[0], val[1], val[2], NULL };
int i;
/* Mark for possible lvm2 command we are running from dmeventd
* lvm2 will not try to talk back to dmeventd while processing it */
(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
(void) dm_snprintf(val[0], sizeof(val[0]), "LVM_RUN_BY_DMEVENTD=1");
if (state->data_percent) {
/* Prepare some known data to env vars for easy use */
if (dm_snprintf(val, sizeof(val), "%d",
state->data_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_DATA", val, 1);
if (dm_snprintf(val, sizeof(val), "%d",
state->metadata_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_METADATA", val, 1);
(void) dm_snprintf(val[1], sizeof(val[1]), "DMEVENTD_THIN_POOL_DATA=%d",
state->data_percent / DM_PERCENT_1);
(void) dm_snprintf(val[2], sizeof(val[2]), "DMEVENTD_THIN_POOL_METADATA=%d",
state->metadata_percent / DM_PERCENT_1);
} else {
/* For an error event it's for a user to check status and decide */
env[1] = NULL;
log_debug("Error event processing.");
}
@@ -95,7 +95,7 @@ static int _run_command(struct dso_state *state)
/* child */
(void) close(0);
for (i = 3; i < 255; ++i) (void) close(i);
execvp(state->argv[0], state->argv);
execve(state->argv[0], state->argv, env);
_exit(errno);
} else if (state->pid == -1) {
log_error("Can't fork command %s.", state->cmd_str);

View File

@@ -802,7 +802,7 @@ bad:
return 1;
}
static const char * const _mode_names[] = {
static const char * _mode_names[] = {
"inode",
"path"
};
@@ -827,10 +827,8 @@ int main(int argc, char **argv)
"mode=%s, path=%s", fm.fd, fm.group_id,
_mode_names[fm.mode], fm.path);
if (!_foreground && !_daemonise(&fm)) {
dm_free(fm.path);
if (!_foreground && !_daemonise(&fm))
return 1;
}
return _dmfilemapd(&fm);
}

View File

@@ -44,8 +44,6 @@ LVMDBUS_BUILDDIR_FILES = \
LVMDBUSD = lvmdbusd
CLEAN_DIRS += __pycache__
include $(top_builddir)/make.tmpl
.PHONY: install_lvmdbusd

View File

@@ -232,6 +232,7 @@ class LvState(State):
@utils.dbus_property(LV_COMMON_INTERFACE, 'Attr', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SnapPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'CopyPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SyncPercent', 'u')
@@ -497,7 +498,7 @@ class Lv(LvCommon):
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes // 80
space = dbo.SizeBytes / 80
remainder = space % 512
optional_size = space + 512 - remainder

View File

@@ -22,6 +22,7 @@
#define LVMETAD_TOKEN_UPDATE_IN_PROGRESS "update in progress"
#define LVMETAD_DISABLE_REASON_DIRECT "DIRECT"
#define LVMETAD_DISABLE_REASON_LVM1 "LVM1"
#define LVMETAD_DISABLE_REASON_DUPLICATES "DUPLICATES"
#define LVMETAD_DISABLE_REASON_VGRESTORE "VGRESTORE"
#define LVMETAD_DISABLE_REASON_REPAIR "REPAIR"

View File

@@ -200,12 +200,12 @@ struct vg_info {
#define GLFL_INVALID 0x00000001
#define GLFL_DISABLE 0x00000002
#define GLFL_DISABLE_REASON_DIRECT 0x00000004
/* 0x00000008 */
#define GLFL_DISABLE_REASON_LVM1 0x00000008
#define GLFL_DISABLE_REASON_DUPLICATES 0x00000010
#define GLFL_DISABLE_REASON_VGRESTORE 0x00000020
#define GLFL_DISABLE_REASON_REPAIR 0x00000040
#define GLFL_DISABLE_REASON_ALL (GLFL_DISABLE_REASON_DIRECT | GLFL_DISABLE_REASON_REPAIR | GLFL_DISABLE_REASON_DUPLICATES | GLFL_DISABLE_REASON_VGRESTORE)
#define GLFL_DISABLE_REASON_ALL (GLFL_DISABLE_REASON_DIRECT | GLFL_DISABLE_REASON_REPAIR | GLFL_DISABLE_REASON_LVM1 | GLFL_DISABLE_REASON_DUPLICATES | GLFL_DISABLE_REASON_VGRESTORE)
#define VGFL_INVALID 0x00000001
@@ -2369,6 +2369,8 @@ static response set_global_info(lvmetad_state *s, request r)
reason_flags |= GLFL_DISABLE_REASON_DIRECT;
if (strstr(reason, LVMETAD_DISABLE_REASON_REPAIR))
reason_flags |= GLFL_DISABLE_REASON_REPAIR;
if (strstr(reason, LVMETAD_DISABLE_REASON_LVM1))
reason_flags |= GLFL_DISABLE_REASON_LVM1;
if (strstr(reason, LVMETAD_DISABLE_REASON_DUPLICATES))
reason_flags |= GLFL_DISABLE_REASON_DUPLICATES;
if (strstr(reason, LVMETAD_DISABLE_REASON_VGRESTORE))
@@ -2419,17 +2421,21 @@ static response set_global_info(lvmetad_state *s, request r)
static response get_global_info(lvmetad_state *s, request r)
{
/* This buffer should be large enough to hold all the possible reasons. */
char reason[REASON_BUF_SIZE] = { 0 };
char reason[REASON_BUF_SIZE];
char flag_str[64];
int pid;
/* This buffer should be large enough to hold all the possible reasons. */
memset(reason, 0, sizeof(reason));
pid = (int)daemon_request_int(r, "pid", 0);
if (s->flags & GLFL_DISABLE) {
snprintf(reason, REASON_BUF_SIZE, "%s%s%s%s",
snprintf(reason, REASON_BUF_SIZE - 1, "%s%s%s%s%s",
(s->flags & GLFL_DISABLE_REASON_DIRECT) ? LVMETAD_DISABLE_REASON_DIRECT "," : "",
(s->flags & GLFL_DISABLE_REASON_REPAIR) ? LVMETAD_DISABLE_REASON_REPAIR "," : "",
(s->flags & GLFL_DISABLE_REASON_LVM1) ? LVMETAD_DISABLE_REASON_LVM1 "," : "",
(s->flags & GLFL_DISABLE_REASON_DUPLICATES) ? LVMETAD_DISABLE_REASON_DUPLICATES "," : "",
(s->flags & GLFL_DISABLE_REASON_VGRESTORE) ? LVMETAD_DISABLE_REASON_VGRESTORE "," : "");
}
@@ -2525,8 +2531,10 @@ inval:
info = dm_hash_lookup(s->vgid_to_info, uuid);
if (!info) {
if (!(info = dm_zalloc(sizeof(struct vg_info))))
info = malloc(sizeof(struct vg_info));
if (!info)
goto bad;
memset(info, 0, sizeof(struct vg_info));
if (!dm_hash_insert(s->vgid_to_info, uuid, (void*)info))
goto bad;
}
@@ -2697,8 +2705,9 @@ static response handler(daemon_state s, client_handle h, request r)
if (!prev_in_progress && this_in_progress) {
/* New update is starting (filter token is replaced by update token) */
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = _monotonic_seconds();
state->update_timeout = update_timeout;
state->update_pid = pid;
@@ -2719,8 +2728,9 @@ static response handler(daemon_state s, client_handle h, request r)
(int)(_monotonic_seconds() - state->update_begin),
state->update_cmd);
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = _monotonic_seconds();
state->update_timeout = update_timeout;
state->update_pid = pid;
@@ -2752,8 +2762,9 @@ static response handler(daemon_state s, client_handle h, request r)
(int)(_monotonic_seconds() - state->update_begin),
state->update_pid, token);
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = 0;
state->update_timeout = 0;
state->update_pid = 0;

View File

@@ -27,8 +27,6 @@ ifeq ("@BUILD_LOCKDDLM@", "yes")
LOCK_LIBS += -ldlm_lt
endif
SOURCES2 = lvmlockctl.c
TARGETS = lvmlockd lvmlockctl
.PHONY: install_lvmlockd

View File

@@ -1009,8 +1009,6 @@ static void add_work_action(struct action *act)
pthread_mutex_unlock(&worker_mutex);
}
#define ERR_LVMETAD_NOT_RUNNING -200
static daemon_reply send_lvmetad(const char *id, ...)
{
daemon_reply reply;
@@ -1031,9 +1029,9 @@ retry:
if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0) {
err = lvmetad_handle.error ?: lvmetad_handle.socket_fd;
pthread_mutex_unlock(&lvmetad_mutex);
log_debug("lvmetad_open reconnect error %d", err);
log_error("lvmetad_open reconnect error %d", err);
memset(&reply, 0, sizeof(reply));
reply.error = ERR_LVMETAD_NOT_RUNNING;
reply.error = err;
va_end(ap);
return reply;
} else {
@@ -1267,15 +1265,6 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
* caches, and tell lvmetad to set global invalid to 0.
*/
/*
* lvmetad not running:
* Even if we have not previously found lvmetad running,
* we attempt to connect and invalidate in case it has
* been started while lvmlockd is running. We don't
* want to allow lvmetad to be used with invalid data if
* it happens to be enabled and started after lvmlockd.
*/
if (inval_meta && (r->type == LD_RT_VG)) {
daemon_reply reply;
char *uuid;
@@ -1295,10 +1284,8 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
"version = " FMTd64, (int64_t)new_version,
NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
if (reply.error != ERR_LVMETAD_NOT_RUNNING)
log_error("set_vg_info in lvmetad failed %d", reply.error);
}
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK"))
log_error("set_vg_info in lvmetad failed %d", reply.error);
daemon_reply_destroy(reply);
}
@@ -1313,10 +1300,8 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
"global_invalid = " FMTd64, INT64_C(1),
NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
if (reply.error != ERR_LVMETAD_NOT_RUNNING)
log_error("set_global_info in lvmetad failed %d", reply.error);
}
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK"))
log_error("set_global_info in lvmetad failed %d", reply.error);
daemon_reply_destroy(reply);
}
@@ -5863,7 +5848,7 @@ static int main_loop(daemon_state *ds_arg)
pthread_mutex_init(&lvmetad_mutex, NULL);
lvmetad_handle = lvmetad_open(NULL);
if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0)
log_debug("lvmetad_open error %d", lvmetad_handle.error);
log_error("lvmetad_open error %d", lvmetad_handle.error);
else
lvmetad_connected = 1;
@@ -5871,13 +5856,8 @@ static int main_loop(daemon_state *ds_arg)
* Attempt to rejoin lockspaces and adopt locks from a previous
* instance of lvmlockd that left behind lockspaces/locks.
*/
if (adopt_opt) {
/* FIXME: implement this without lvmetad */
if (!lvmetad_connected)
log_error("Cannot adopt locks without lvmetad running.");
else
adopt_locks();
}
if (adopt_opt)
adopt_locks();
while (1) {
rv = poll(pollfd, pollfd_maxi + 1, -1);

View File

@@ -699,7 +699,7 @@ int lm_hosts_dlm(struct lockspace *ls, int notify)
return 0;
memset(ls_nodes_path, 0, sizeof(ls_nodes_path));
snprintf(ls_nodes_path, PATH_MAX, "%s/%s/nodes",
snprintf(ls_nodes_path, PATH_MAX-1, "%s/%s/nodes",
DLM_LOCKSPACES_PATH, ls->name);
if (!(ls_dir = opendir(ls_nodes_path)))

View File

@@ -294,36 +294,6 @@ out:
return host_id;
}
/* Prepare valid /dev/mapper/vgname-lvname with all the mangling */
static int build_dm_path(char *path, size_t path_len,
const char *vg_name, const char *lv_name)
{
struct dm_pool *mem;
char *dm_name;
int rv = 0;
if (!(mem = dm_pool_create("namepool", 1024))) {
log_error("Failed to create mempool.");
return -ENOMEM;
}
if (!(dm_name = dm_build_dm_name(mem, vg_name, lv_name, NULL))) {
log_error("Failed to build dm name for %s/%s.", vg_name, lv_name);
rv = -EINVAL;
goto fail;
}
if ((dm_snprintf(path, path_len, "%s/%s", dm_dir(), dm_name) < 0)) {
log_error("Failed to create path %s/%s.", dm_dir(), dm_name);
rv = -EINVAL;
}
fail:
dm_pool_destroy(mem);
return rv;
}
/*
* vgcreate
*
@@ -366,8 +336,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
if (strlen(lock_lv_name) + strlen(lock_args_version) + 2 > MAX_ARGS)
return -EARGS;
if ((rv = build_dm_path(disk.path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
log_debug("S %s init_vg_san path %s", ls_name, disk.path);
@@ -544,8 +513,7 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN);
rd.rs.num_disks = 1;
if ((rv = build_dm_path(rd.rs.disks[0].path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(rd.rs.disks[0].path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
align_size = sanlock_align(&rd.rs.disks[0]);
if (align_size <= 0) {
@@ -644,8 +612,7 @@ int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_
return rv;
}
if ((rv = build_dm_path(disk.path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
log_debug("S %s rename_vg_san path %s", ls_name, disk.path);
@@ -1102,10 +1069,10 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
* and appending "lockctl" to get /path/to/lvmlockctl.
*/
memset(killpath, 0, sizeof(killpath));
snprintf(killpath, SANLK_PATH_LEN, "%slockctl", LVM_PATH);
snprintf(killpath, SANLK_PATH_LEN - 1, "%slockctl", LVM_PATH);
memset(killargs, 0, sizeof(killargs));
snprintf(killargs, SANLK_PATH_LEN, "--kill %s", ls->vg_name);
snprintf(killargs, SANLK_PATH_LEN - 1, "--kill %s", ls->vg_name);
rv = check_args_version(ls->vg_args, VG_LOCK_ARGS_MAJOR);
if (rv < 0) {
@@ -1121,8 +1088,8 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
goto fail;
}
if ((ret = build_dm_path(disk_path, SANLK_PATH_LEN, ls->vg_name, lock_lv_name)))
goto fail;
snprintf(disk_path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s",
ls->vg_name, lock_lv_name);
/*
* When a vg is started, the internal sanlock lv should be

View File

@@ -1,248 +0,0 @@
#include "target.h"
// For DM_ARRAY_SIZE!
#include "libdevmapper.h"
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
//----------------------------------------------------------------
static char *_tok_cpy(const char *b, const char *e)
{
char *new = malloc((e - b) + 1);
char *ptr = new;
if (new) {
while (b != e)
*ptr++ = *b++;
*ptr = '\0';
}
return new;
}
static bool _tok_eq(const char *b, const char *e, const char *str)
{
while (b != e) {
if (!*str || *b != *str)
return false;
b++;
str++;
}
return !*str;
}
static bool _parse_operating_mode(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_operating_mode mode;
} _table[] = {
{"recovering", VDO_MODE_RECOVERING},
{"read-only", VDO_MODE_READ_ONLY},
{"normal", VDO_MODE_NORMAL}
};
enum vdo_operating_mode *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].mode;
return true;
}
}
return false;
}
static bool _parse_compression_state(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_compression_state state;
} _table[] = {
{"online", VDO_COMPRESSION_ONLINE},
{"offline", VDO_COMPRESSION_OFFLINE}
};
enum vdo_compression_state *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].state;
return true;
}
}
return false;
}
static bool _parse_recovering(const char *b, const char *e, void *context)
{
bool *r = context;
if (_tok_eq(b, e, "recovering"))
*r = true;
else if (_tok_eq(b, e, "-"))
*r = false;
else
return false;
return true;
}
static bool _parse_index_state(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_index_state state;
} _table[] = {
{"error", VDO_INDEX_ERROR},
{"closed", VDO_INDEX_CLOSED},
{"opening", VDO_INDEX_OPENING},
{"closing", VDO_INDEX_CLOSING},
{"offline", VDO_INDEX_OFFLINE},
{"online", VDO_INDEX_ONLINE},
{"unknown", VDO_INDEX_UNKNOWN}
};
enum vdo_index_state *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].state;
return true;
}
}
return false;
}
static bool _parse_uint64(const char *b, const char *e, void *context)
{
uint64_t *r = context, n;
n = 0;
while (b != e) {
if (!isdigit(*b))
return false;
n = (n * 10) + (*b - '0');
b++;
}
*r = n;
return true;
}
static const char *_eat_space(const char *b, const char *e)
{
while (b != e && isspace(*b))
b++;
return b;
}
static const char *_next_tok(const char *b, const char *e)
{
const char *te = b;
while (te != e && !isspace(*te))
te++;
return te == b ? NULL : te;
}
static void _set_error(struct vdo_status_parse_result *result, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
static void _set_error(struct vdo_status_parse_result *result, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vsnprintf(result->error, sizeof(result->error), fmt, ap);
va_end(ap);
}
static bool _parse_field(const char **b, const char *e,
bool (*p_fn)(const char *, const char *, void *),
void *field, const char *field_name,
struct vdo_status_parse_result *result)
{
const char *te;
te = _next_tok(*b, e);
if (!te) {
_set_error(result, "couldn't get token for '%s'", field_name);
return false;
}
if (!p_fn(*b, te, field)) {
_set_error(result, "couldn't parse '%s'", field_name);
return false;
}
*b = _eat_space(te, e);
return true;
}
bool vdo_status_parse(const char *input, struct vdo_status_parse_result *result)
{
const char *b = b = input;
const char *e = input + strlen(input);
const char *te;
struct vdo_status *s = malloc(sizeof(*s));
if (!s) {
_set_error(result, "out of memory");
return false;
}
b = _eat_space(b, e);
te = _next_tok(b, e);
if (!te) {
_set_error(result, "couldn't get token for device");
free(s);
return false;
}
s->device = _tok_cpy(b, te);
if (!s->device) {
_set_error(result, "out of memory");
free(s);
return false;
}
b = _eat_space(te, e);
#define XX(p, f, fn) if (!_parse_field(&b, e, p, f, fn, result)) goto bad;
XX(_parse_operating_mode, &s->operating_mode, "operating mode");
XX(_parse_recovering, &s->recovering, "recovering");
XX(_parse_index_state, &s->index_state, "index state");
XX(_parse_compression_state, &s->compression_state, "compression state");
XX(_parse_uint64, &s->used_blocks, "used blocks");
XX(_parse_uint64, &s->total_blocks, "total blocks");
#undef XX
if (b != e) {
_set_error(result, "too many tokens");
goto bad;
}
result->status = s;
return true;
bad:
free(s->device);
free(s);
return false;
}
//----------------------------------------------------------------

View File

@@ -1,68 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef DEVICE_MAPPER_VDO_TARGET_H
#define DEVICE_MAPPER_VDO_TARGET_H
#include <stdbool.h>
#include <stdint.h>
//----------------------------------------------------------------
enum vdo_operating_mode {
VDO_MODE_RECOVERING,
VDO_MODE_READ_ONLY,
VDO_MODE_NORMAL
};
enum vdo_compression_state {
VDO_COMPRESSION_ONLINE,
VDO_COMPRESSION_OFFLINE
};
enum vdo_index_state {
VDO_INDEX_ERROR,
VDO_INDEX_CLOSED,
VDO_INDEX_OPENING,
VDO_INDEX_CLOSING,
VDO_INDEX_OFFLINE,
VDO_INDEX_ONLINE,
VDO_INDEX_UNKNOWN
};
struct vdo_status {
char *device;
enum vdo_operating_mode operating_mode;
bool recovering;
enum vdo_index_state index_state;
enum vdo_compression_state compression_state;
uint64_t used_blocks;
uint64_t total_blocks;
};
void vdo_status_destroy(struct vdo_status *s);
#define VDO_MAX_ERROR 256
struct vdo_status_parse_result {
char error[VDO_MAX_ERROR];
struct vdo_status *status;
};
// Parses the status line from the kernel target.
bool vdo_status_parse(const char *input, struct vdo_status_parse_result *result);
//----------------------------------------------------------------
#endif

View File

@@ -1,338 +0,0 @@
LVM disk reading
Reading disks happens in two phases. The first is a discovery phase,
which determines what's on the disks. The second is a working phase,
which does a particular job for the command.
Phase 1: Discovery
------------------
Read all the disks on the system to find out:
- What are the LVM devices?
- What VG's exist on those devices?
This phase is called "label scan" (although it reads and scans everything,
not just the label.) It stores the information it discovers (what LVM
devices exist, and what VGs exist on them) in lvmcache. The devs/VGs info
in lvmcache is the starting point for phase two.
Phase 1 in outline:
For each device:
a. Read the first <N> KB of the device. (N is configurable.)
b. Look for the lvm label_header in the first four sectors,
if none exists, it's not an lvm device, so quit looking at it.
(By default, label_header is in the second sector.)
c. Look at the pv_header, which follows the label_header.
This tells us the location of VG metadata on the device.
There can be 0, 1 or 2 copies of VG metadata. The first
is always at the start of the device, the second (if used)
is at the end.
d. Look at the first mda_header (location came from pv_header
in the previous step). This is by default in sector 8,
4096 bytes from the start of the device. This tells us the
location of the actual VG metadata text.
e. Look at the first copy of the text VG metadata (location came
from mda_header in the previous step). This is by default
in sector 9, 4608 bytes from the start of the device.
The VG metadata is only partially analyzed to create a basic
summary of the VG.
f. Store an "info" entry in lvmcache for this device,
indicating that it is an lvm device, and store a "vginfo"
entry in lvmcache indicating the name of the VG seen
in the metadata in step e.
g. If the pv_header in step c shows a second mda_header
location at the end of the device, then read that as
in step d, and repeat steps e-f for it.
At the end of phase 1, lvmcache will have a list of devices
that belong to LVM, and a list of VG names that exist on
those devices. Each device (info struct) is associated
with the VG (vginfo struct) it is used in.
Phase 1 in code:
The most relevant functions are listed for each step in the outline.
lvmcache_label_scan()
label_scan()
. dev_cache_scan()
choose which devices on the system to look at
. for each dev in dev_cache: bcache prefetch/read
. _process_block() to process data from bcache
_find_lvm_header() checks if this is an lvm dev by looking at label_header
_text_read() via ops->read() looks at mda/pv/vg data to populate lvmcache
. _read_mda_header_and_metadata()
raw_read_mda_header()
. _read_mda_header_and_metadata()
read_metadata_location()
text_read_metadata_summary()
config_file_read_fd()
_read_vgsummary() via ops->read_vgsummary()
. _text_read(): lvmcache_add()
[adds this device to list of lvm devices]
_read_mda_header_and_metadata(): lvmcache_update_vgname_and_id()
[adds the VG name to list of VGs]
Phase 2: Work
-------------
This phase carries out the operation requested by the command that was
run.
Whereas the first phase is based on iterating through each device on the
system, this phase is based on iterating through each VG name. The list
of VG names comes from phase 1, which stored the list in lvmcache to be
used by phase 2.
Some commands may need to iterate through all VG names, while others may
need to iterate through just one or two.
This phase includes locking each VG as work is done on it, so that two
commands do not interfere with each other.
Phase 2 in outline:
For each VG name:
a. Lock the VG.
b. Repeat the phase 1 scan steps for each device in this VG.
The phase 1 information in lvmcache may have changed because no VG lock
was held during phase 1. So, repeat the phase 1 steps, but only for the
devices in this VG. N.B. for commands that are just reporting data,
we skip this step if the data from phase 1 was complete and consistent.
c. Get the list of on-disk metadata locations for this VG.
Phase 1 created this list in lvmcache to be used here. At this
point we copy it out of lvmcache. In the simple/common case,
this is a list of devices in the VG. But, some devices may
have 0 or 2 metadata locations instead of the default 1, so it
is not always equal to the list of devices. We want to read
every copy of the metadata for this VG.
d. For each metadata location on each device in the VG
(the list from the previous step):
1) Look at the mda_header. The location of the mda_header was saved
in the lvmcache info struct by phase 1 (where it came from the
pv_header.) The mda_header tells us where the text VG metadata is
located.
2) Look at the text VG metadata. The location came from mda_header
in the previous step. The VG metadata is fully analyzed and used
to create an in-memory 'struct volume_group'.
e. Compare the copies of VG metadata that were found in each location.
If some copies are older, choose the newest one to use, and update
any older copies.
f. Update details about the devices/VG in lvmcache.
g. Pass the 'vg' struct to the command-specific code to work with.
Phase 2 in code:
The most relevant functions are listed for each step in the outline.
For each VG name:
process_each_vg()
. vg_read()
lock_vol()
. vg_read()
lvmcache_label_rescan_vg() (if needed)
[insert phase 1 steps for scanning devs, but only devs in this vg]
. vg_read()
create_instance()
_text_create_text_instance()
_create_vg_text_instance()
lvmcache_fid_add_mdas_vg()
[Copies mda locations from info->mdas where it was saved
by phase 1, into fid->metadata_areas_in_use. This is
the key connection between phase 1 and phase 2.]
. dm_list_iterate_items(mda, &fid->metadata_areas_in_use)
. _vg_read_raw() via ops->vg_read()
raw_read_mda_header()
. _vg_read_raw()
text_read_metadata()
config_file_read_fd()
_read_vg() via ops->read_vg()
. return the 'vg' struct from vg_read() and use it to do
command-specific work
Filter i/o
----------
Some filters must be applied before reading a device, and other filters
must be applied after reading a device. In all cases, the filters must be
applied before lvm processes the device, i.e. before it looks for an lvm
label.
1. Some filters need to be applied prior to reading any devices
because the purpose of the filter is to avoid submitting any
io on the excluded devices. The regex filter is the primary
example. Other filters benefit from being applied prior to
reading devices because they can tell which devices to
exclude without doing io to the device. An example of this
is the mpath filter.
2. Some filters need to be applied after reading a device because
they are based on data/signatures seen on the device.
The partitioned filter is an example of this; lvm needs to
read a device to see if it has a partition table before it can
know whether to exclude the device from further processing.
We apply filters from 1 before reading devices, and we apply filters from
2 after populating bcache, but before processing the device (i.e. before
checking for an lvm label, which is the first step in processing.)
The current implementation of this makes filters return -EAGAIN if they
want to read the device, but bcache data is not yet available. This will
happen when filtering runs prior to populating bcache. In this case the
device is flagged. After bcache is populated, the filters are reapplied
to the flagged devices. The filters which need to look at device content
are now able to get it from bcache. Devices that do not pass filters at
this point are excluded just like devices which were excluded earlier.
(Some filters from 2 can be skipped by consulting udev for the information
instead of reading the device. This is not entirely reliable, so it is
disabled by default with the config setting external_device_info_source.
It may be worthwhile to change the filters to use the udev info as a hint,
or only use udev info for filtering in reporting commands where
inaccuracies are not a big problem.)
I/O Performance
---------------
. 400 loop devices used as PVs
. 40 VGs each with 10 PVs
. each VG has one active LV
. each of the 10 PVs in vg0 has an artificial 100 ms read delay
. read/write/io_submit are system call counts using strace
. old is lvm 2.2.175
. new is lvm 2.2.178 (shortly before)
Command: pvs
------------
old: 0m17.422s
new: 0m0.331s
old: read 7773 write 497
new: read 2807 write 495 io_submit 448
Command: vgs
------------
old: 0m20.383s
new: 0m0.325s
old: read 10684 write 129
new: read 2807 write 129 io_submit 448
Command: vgck vg0
-----------------
old: 0m16.212s
new: 0m1.290s
old: read 6372 write 4
new: read 2807 write 4 io_submit 458
Command: lvcreate -n test -l1 -an vg0
-------------------------------------
old: 0m29.271s
new: 0m1.351s
old: read 6503 write 39
new: read 2808 write 9 io_submit 488
Command: lvremove vg0/test
--------------------------
old: 0m29.262s
new: 0m1.348s
old: read 6502 write 36
new: read 2807 write 6 io_submit 488
io_submit sources
-----------------
vgs:
reads:
- 400 for each PV
- 40 for each LV
- 8 for other devs on the system
vgck vg0:
reads:
- 400 for each PV
- 40 for each LV
- 10 for each PV in vg0 (rescan)
- 8 for other devs on the system
lvcreate -n test -l1 -an vg0
reads:
- 400 for each PV
- 40 for each LV
- 10 for each PV in vg0 (rescan)
- 8 for other devs on the system
writes:
- 10 for metadata on each PV in vg0
- 10 for precommit on each PV in vg0
- 10 for commit on each PV in vg0
With lvmetad
------------
Command: pvs
------------
old: 0m5.405s
new: 0m1.404s
Command: vgs
------------
old: 0m0.222s
new: 0m0.223s
Command: lvcreate -n test -l1 -an vg0
-------------------------------------
old: 0m10.128s
new: 0m1.137s

View File

@@ -1,158 +0,0 @@
Over time, I'd like to refactor the LVM code into these high level modules.
+-------------------------------------------+
| |
| User Interface |
| |
| |
+-------------------+-----------------------+
|
+--------------------v-----------------------+
| |
| LVM Core |
| |
| |
+----+----------------+-----------------+----+
| | |
+-----v-----+ +-----v------+ +------v----+
| | | | | |
| Device | | Metadata | | System |
| Mapper | | | | |
| | | | | |
| | | | | |
| | | | | |
+-----------+ +------------+ +-----------+
+---------------------------------------------------------+
+------------------------------------+
| |
| Base |
| |
| |
| |
| |
+------------------------------------+
Going from the bottom up we have:
Base
----
This holds all our general purpose code such as data structures, regex engine,
memory allocators. In fact pretty much everything in libdevmapper apart from
the dm code and config.
This can be used by any code in the system, which is why I've drawn a line
between it and the code above rather than using arrows.
If anyone can come up with a better name please do. I'm trying to stay away
from 'utils'.
Device mapper
-------------
As well as the low level dm-ioctl driving code we need to have all our dm 'best
practise' stuff in here. For instance this is the code that decides to use the
mirror target to move some data around; that knows to suspend a thin volume
before taking a snapshot of it. This module is going to have a lot more code
in it than the current libdevmapper.
It should not know anything about the LVM abstractions or metadata (no PVs, LVs
or VGs). It just knows about the dm world.
Code in here is only allowed to use base.
Metadata model
--------------
Here we have all the format handling, labelling, config parsing etc. We try
and put *everything* to do with LVM in here that doesn't actually require dm.
System
------
Code that interfaces with the system (udev etc).
LVM Core
--------
[terrible name]
This ties together the last 3 units. It should just be glue. We need to be
strict about pushing code down from here to keep this as small as possible.
User interface
--------------
Self explanatory.
Headers
-------
Headers will be included using sub directories to make it clearer where they
are in the tree.
eg,
#include "base/mm/pool.h"
#include "base/data-struct/list.h"
#include "dm/thin-provisioning.h"
#include "core/pvmove.h"
Getting there
=============
+-------------------------------------------+
| |
| |
| Tools |
| |
| |
| |
+---------+------------------------------+--+
| |
| +---------------v---------------------------+
| | |
| | |
| | Lib |
| | |
| | |
| | |
| | |
| +----------------+--------------------------+
| |
| |
+-----v-------------------------------v-----+
| |
| |
| libdevmapper |
| |
| |
| |
| |
+-------------------------------------------+
This is where I see us now.
'base' should be easy to factor out, it's just the non-dm part of libdevmapper
(ie. the bulk of it). But we have the problem that libdevmapper is a public
interface to get round.
'lib' is where the bulk of our code currently is. Dependency-wise the code is
a bit like a ball of string. So splitting it up is going to take time. We can
probably pull code pretty quickly into the 'metadata model' dir. But factoring
out the dm best practises stuff is going to require splitting at least
files, and probably functions. Certainly not something that can be done in one
go. System should just be a question of cherry picking functions.
I'm not too familiar with the tools dir. Hopefully it just corresponds with
the User Interface module and doesn't contain any business logic.

View File

@@ -1,53 +0,0 @@
Version 2.02.178
================
There are going to be some large changes to the lvm2 codebase
over the next year or so. Starting with this release. These
changes should be internal rather than having a big effect on
the command line. Inevitably these changes will increase the
chance of bugs, so please be on the alert.
Remove support for obsolete metadata formats
--------------------------------------------
Support for the GFS pool format, and format used by the
original 1990's version of LVM1 have been removed.
Use asynchronous IO
-------------------
Almost all IO uses libaio now.
Rewrite label scanning
----------------------
Dave Teigland has reworked the label scanning and metadata reading
logic to minimise the amount of IOs issued. Combined with the aio changes
this can greatly improve scanning speed for some systems.
./configure options
-------------------
We're going to try and remove as many options from ./configure as we
can. Each option multiplies the number of possible configurations
that we should test (this testing is currently not occurring).
The first batch to be removed are:
--enable-testing
--with-snapshots
--with-mirrors
--with-raid
--with-thin
--with-cache
Stable targets that are in the upstream kernel will just be supported.
In future optional target flags will be given in two situations:
1) The target is experimental, or not upstream at all (eg, vdo).
2) The target is deprecated and support will be removed at some future date.
This decision could well be contentious, so could distro maintainers feel
free to comment.

View File

@@ -1,257 +0,0 @@
Building unit tests
===================
make unit-unit/unit-test
Running unit tests
==================
The tests leave no artifacts at the moment, so you can just run
unit-test/unit-test from wherever you want.
./unit-test <list|run> [pattern]
Listing tests
-------------
Every test has a symbolic path associated with it. Just like file paths they
are split into components separated by '/'s. The 'list' command will show you
a tree of these tests, along with some description text.
ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list
base
data-struct
bitset
and ................................................. and all bits
equal ............................................... equality
get_next ............................................ get next set bit
list
splice .............................................. joining lists together
string
asprint ............................................. tests asprint
strncpy ............................................. tests string copying
device
bcache
block-size-multiple-page ............................ block size must be a multiple of page size
block-size-positive ................................. block size must be positive
blocks-get-evicted .................................. block get evicted with many reads
cache-blocks-positive ............................... nr cache blocks must be positive
create-destroy ...................................... simple create/destroy
flush-waits ......................................... flush waits for all dirty
get-reads ........................................... bcache_get() triggers read
prefetch-never-waits ................................ too many prefetches does not trigger a wait
prefetch-reads ...................................... prefetch issues a read
read-multiple-files ................................. read from multiple files
reads-cached ........................................ repeated reads are cached
writeback-occurs .................................... dirty data gets written back
zero-flag-dirties ................................... zeroed data counts as dirty
formatting
percent
0 ................................................... Pretty printing of percentages near 0%
100 ................................................. Pretty printing of percentages near 100%
regex
fingerprints .......................................... not sure
matching .............................................. test the matcher with a variety of regexes
dm
target
mirror
status .............................................. parsing mirror status
metadata
config
cascade ............................................... cascade
clone ................................................. duplicating a config tree
parse ................................................. parsing various
An optional 'pattern' argument may be specified to select subsets of tests.
This pattern is a posix regex and does a substring match, so you will need to
use anchors if you particularly want the match at the beginning or end of the
string.
ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list data-struct
base
data-struct
bitset
and ................................................. and all bits
equal ............................................... equality
get_next ............................................ get next set bit
list
splice .............................................. joining lists together
string
asprint ............................................. tests asprint
strncpy ............................................. tests string copying
ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test list s$
base
device
bcache
flush-waits ......................................... flush waits for all dirty
get-reads ........................................... bcache_get() triggers read
prefetch-never-waits ................................ too many prefetches does not trigger a wait
prefetch-reads ...................................... prefetch issues a read
read-multiple-files ................................. read from multiple files
writeback-occurs .................................... dirty data gets written back
zero-flag-dirties ................................... zeroed data counts as dirty
regex
fingerprints .......................................... not sure
dm
target
mirror
status .............................................. parsing mirror status
Running tests
=============
'make run-unit-test' from the top level will run all unit tests. But I tend to
run it by hand to I can select just the tests I'm working on.
Use the 'run' command to run the tests. Currently all logging goes to stderr,
so the test runner prints a line at the start of the test and a line
indicating success or failure at the end.
ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test run bcache/block-size
[RUN ] /base/device/bcache/block-size-multiple-page
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
bcache block size must be a multiple of page size
[ OK] /base/device/bcache/block-size-multiple-page
[RUN ] /base/device/bcache/block-size-positive
bcache must have a non zero block size
[ OK] /base/device/bcache/block-size-positive
2/2 tests passed
ejt@devel-vm1:~/lvm2/unit-test/$ ./unit-test run data-struct
[RUN ] /base/data-struct/bitset/and
[ OK] /base/data-struct/bitset/and
[RUN ] /base/data-struct/bitset/equal
[ OK] /base/data-struct/bitset/equal
[RUN ] /base/data-struct/bitset/get_next
[ OK] /base/data-struct/bitset/get_next
[RUN ] /base/data-struct/list/splice
[ OK] /base/data-struct/list/splice
[RUN ] /base/data-struct/string/asprint
[ OK] /base/data-struct/string/asprint
[RUN ] /base/data-struct/string/strncpy
[ OK] /base/data-struct/string/strncpy
6/6 tests passed
Writing tests
=============
[See unit-test/framework.h and unit-test/units.h for the details]
Tests are grouped together into 'suites', all tests in a suite share a
'fixture'. A fixture is a void * to any object you want; use it to set up any
common environment that you need for the tests to run (eg, creating a dm_pool).
Test suites have nothing to do with the test paths, you can have tests from
different suites with similar paths, the runner sorts things for you.
Put your tests in a file in unit-test/, with '_t' at the end of the name
(convention only, nothing relies on this).
#include "units.h"
Then write any fixtures you need:
eg,
static void *_mem_init(void) {
struct dm_pool *mem = dm_pool_create("bitset test", 1024);
if (!mem) {
fprintf(stderr, "out of memory\n");
exit(1);
}
return mem;
}
static void _mem_exit(void *mem)
{
dm_pool_destroy(mem);
}
Then write your tests, which should take the void * that was returned by your
fixture. Use the T_ASSERT* macros to indicate failure.
eg,
static void test_equal(void *fixture)
{
struct dm_pool *mem = fixture;
dm_bitset_t bs1 = dm_bitset_create(mem, NR_BITS);
dm_bitset_t bs2 = dm_bitset_create(mem, NR_BITS);
int i, j;
for (i = 0, j = 1; i < NR_BITS; i += j, j++) {
dm_bit_set(bs1, i);
dm_bit_set(bs2, i);
}
T_ASSERT(dm_bitset_equal(bs1, bs2));
T_ASSERT(dm_bitset_equal(bs2, bs1));
for (i = 0; i < NR_BITS; i++) {
bit_flip(bs1, i);
T_ASSERT(!dm_bitset_equal(bs1, bs2));
T_ASSERT(!dm_bitset_equal(bs2, bs1));
T_ASSERT(dm_bitset_equal(bs1, bs1)); /* comparing with self */
bit_flip(bs1, i);
}
}
At the end of your test file you should write a function that builds one or
more test suites and adds them to the list of all suites that is passed in. I
tend to write a little macro (T) to save typing the same test path repeatedly.
eg,
#define T(path, desc, fn) register_test(ts, "/base/data-struct/bitset/" path, desc, fn)
void bitset_tests(struct dm_list *all_tests)
{
struct test_suite *ts = test_suite_create(_mem_init, _mem_exit);
if (!ts) {
fprintf(stderr, "out of memory\n");
exit(1);
}
T("get_next", "get next set bit", test_get_next);
T("equal", "equality", test_equal);
T("and", "and all bits", test_and);
dm_list_add(all_tests, &ts->list);
}
Then you need to declare your registration function and call it in units.h.
// Declare the function that adds tests suites here ...
...
void bitset_tests(struct dm_list *suites);
...
// ... and call it in here.
static inline void register_all_tests(struct dm_list *suites)
{
...
bitset_tests(suites);
...
}
Finally add your test file to the Makefile.in and rerun configure.

View File

@@ -1,104 +0,0 @@
# VDO - Compression and deduplication.
Currently device stacking looks like this:
Physical x [multipath] x [partition] x [mdadm] x [LUKS] x [LVS] x [LUKS] x [FS|Database|...]
Adding VDO:
Physical x [multipath] x [partition] x [mdadm] x [LUKS] x [LVS] x [LUKS] x VDO x [LVS] x [FS|Database|...]
## Where VDO fits (and where it does not):
### Backing devices for VDO volumes:
1. Physical x [multipath] x [partition] x [mdadm],
2. LUKS over (1) - full disk encryption.
3. LVs (raids|mirror|stripe|linear) x [cache] over (1).
4. LUKS over (3) - especially when using raids.
Usual limitations apply:
- Never layer LUKS over another LUKS - it makes no sense.
- LUKS is better over the raids, than under.
Devices which are not best suitable as backing device:
- thin volumes - at the moment it is not possible to take snapshot of active VDO volume on top of thin volume.
### Using VDO as a PV:
1. under tdata
- The best fit - it will deduplicate additional redundancies among all
snapshots and will reduce the footprint.
- Risks: Resize! dmevent will not be able to handle resizing of tpool ATM.
2. under corig
- This is useful to keep the most frequently used data in cache
uncompressed or without deduplication if that happens to be a bottleneck.
- Cache may fit better under VDO device, depending on compressibility and
amount of duplicates, as
- compression will reduce amount of data, thus effectively increasing
size of cache,
- and deduplication may emphasize hotspots.
- Performance testing of your particular workload is strongly recommended.
3. under (multiple) linear LVs - e.g. used for VMs.
### And where VDO does not fit:
- *never* use VDO under LUKS volumes
- these are random data and do not compress nor deduplicate well,
- *never* use VDO under cmeta and tmeta LVs
- these are random data and do not compress nor deduplicate well,
- under raids
- raid{4,5,6} scrambles data, so they do not deduplicate well,
- raid{1,4,5,6,10} also causes amount of data grow, so more (duplicit in
case of raid{1,10}) work has to be done in order to find less duplicates.
### And where it could be useful:
- under snapshot CoW device - when there are multiple of those it could deduplicate
## Development
### Things to decide
- under integrity devices
- VDO should work well for data blocks,
- but hashes are mostly unique and not compressible - were it possible it
would make sense to have separate imeta and idata volumes for integrity
devices.
### Future Integration of VDO into LVM:
One issue is using both LUKS and RAID under VDO. We have two options:
- use mdadm x LUKS x VDO+LV
- use LV RAID x LUKS x VDO+LV
In both cases dmeventd will not be able to resize the volume at the moment.
Another issue is duality of VDO - it can be used as a top level LV (with a
filesystem on top) but it can be used as "pool" for multiple devices too.
This will be solved in similar way thin pools allow multiple volumes.
Also VDO, has two sizes - its physical size and virtual size - and when
overprovisioning, just like tpool, we face same problems - VDO can get full,
without exposing it to a FS. dmeventd monitoring will be needed.
Another possible RFE is to split data and metadata - keep data on HDD and metadata on SSD.
## Issues / Testing
- fstrim/discard pass down - does it work with VDO?
- VDO can run in synchronous vs. asynchronous mode:
- synchronous for devices where write is safe after it is confirmed. Some
devices are lying.
- asynchronous for devices requiring flush.
- Multiple devices under VDO - need to find and expose common properties, or
not allow grouping them together. (This is same for all volumes with more
physical devices below.)
- pvmove changing characteristics of underlying device.
- autoactivation during boot?
- Q: can we use VDO for RootFS? Dracut!

View File

@@ -14,7 +14,6 @@
@top_srcdir@/lib/config/defaults.h
@top_srcdir@/lib/datastruct/btree.h
@top_srcdir@/lib/datastruct/str_list.h
@top_srcdir@/lib/device/bcache.h
@top_srcdir@/lib/device/dev-cache.h
@top_srcdir@/lib/device/dev-ext-udev-constants.h
@top_srcdir@/lib/device/dev-type.h

View File

@@ -1,4 +1,4 @@
/* include/configure.h.in. Generated from configure.ac by autoheader. */
/* include/configure.h.in. Generated from configure.in by autoheader. */
/* Define to 1 to use libblkid detection of signatures when wiping. */
#undef BLKID_WIPING_SUPPORT
@@ -72,6 +72,10 @@
/* Default system configuration directory. */
#undef DEFAULT_ETC_DIR
/* Fall back to LVM1 by default if device-mapper is missing from the kernel.
*/
#undef DEFAULT_FALLBACK_TO_LVM1
/* Name of default locking directory. */
#undef DEFAULT_LOCK_DIR
@@ -245,9 +249,6 @@
/* Define to 1 if you have the <langinfo.h> header file. */
#undef HAVE_LANGINFO_H
/* Define to 1 if you have the <libaio.h> header file. */
#undef HAVE_LIBAIO_H
/* Define to 1 if you have the <libcman.h> header file. */
#undef HAVE_LIBCMAN_H
@@ -346,6 +347,9 @@
/* Define to 1 if the system has the type `ptrdiff_t'. */
#undef HAVE_PTRDIFF_T
/* Define to 1 if the compiler has the `__builtin_clz` builtin. */
#undef HAVE___BUILTIN_CLZ
/* Define to 1 if you have the <readline/history.h> header file. */
#undef HAVE_READLINE_HISTORY_H
@@ -474,16 +478,9 @@
/* Define to 1 if you have the `strtoull' function. */
#undef HAVE_STRTOULL
/* Define to 1 if `st_blocks' is a member of `struct stat'. */
#undef HAVE_STRUCT_STAT_ST_BLOCKS
/* Define to 1 if `st_rdev' is a member of `struct stat'. */
#undef HAVE_STRUCT_STAT_ST_RDEV
/* Define to 1 if your `struct stat' has `st_blocks'. Deprecated, use
`HAVE_STRUCT_STAT_ST_BLOCKS' instead. */
#undef HAVE_ST_BLOCKS
/* Define to 1 if you have the <syslog.h> header file. */
#undef HAVE_SYSLOG_H
@@ -555,9 +552,6 @@
/* Define to 1 if you have the <sys/utsname.h> header file. */
#undef HAVE_SYS_UTSNAME_H
/* Define to 1 if you have the <sys/vfs.h> header file. */
#undef HAVE_SYS_VFS_H
/* Define to 1 if you have the <sys/wait.h> header file. */
#undef HAVE_SYS_WAIT_H
@@ -597,9 +591,6 @@
/* Define to 1 if the system has the type `_Bool'. */
#undef HAVE__BOOL
/* Define to 1 if the system has the `__builtin_clz' built-in function */
#undef HAVE___BUILTIN_CLZ
/* Internalization package */
#undef INTL_PACKAGE
@@ -616,6 +607,13 @@
slash. */
#undef LSTAT_FOLLOWS_SLASHED_SYMLINK
/* Define to 1 if 'lvm' should fall back to using LVM1 binaries if
device-mapper is missing from the kernel */
#undef LVM1_FALLBACK
/* Define to 1 to include built-in support for LVM1 metadata. */
#undef LVM1_INTERNAL
/* Path to lvmetad pidfile. */
#undef LVMETAD_PIDFILE
@@ -678,6 +676,9 @@
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Define to 1 to include built-in support for GFS pool metadata. */
#undef POOL_INTERNAL
/* Define to 1 to include built-in support for raid. */
#undef RAID_INTERNAL

View File

@@ -16,6 +16,34 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
ifeq ("@LVM1@", "shared")
SUBDIRS = format1
endif
ifeq ("@POOL@", "shared")
SUBDIRS += format_pool
endif
ifeq ("@SNAPSHOTS@", "shared")
SUBDIRS += snapshot
endif
ifeq ("@MIRRORS@", "shared")
SUBDIRS += mirror
endif
ifeq ("@RAID@", "shared")
SUBDIRS += raid
endif
ifeq ("@THIN@", "shared")
SUBDIRS += thin
endif
ifeq ("@CACHE@", "shared")
SUBDIRS += cache_segtype
endif
ifeq ("@CLUSTER@", "shared")
SUBDIRS += locking
endif
@@ -23,13 +51,10 @@ endif
SOURCES =\
activate/activate.c \
cache/lvmcache.c \
cache_segtype/cache.c \
commands/toolcontext.c \
config/config.c \
datastruct/btree.c \
datastruct/str_list.c \
device/bcache.c \
device/bcache-utils.c \
device/dev-cache.c \
device/dev-ext.c \
device/dev-io.c \
@@ -38,7 +63,6 @@ SOURCES =\
device/dev-type.c \
device/dev-luks.c \
device/dev-dasd.c \
device/dev-lvm1-pool.c \
display/display.c \
error/errseg.c \
unknown/unknown.c \
@@ -53,7 +77,6 @@ SOURCES =\
filters/filter-type.c \
filters/filter-usable.c \
filters/filter-internal.c \
filters/filter-signature.c \
format_text/archive.c \
format_text/archiver.c \
format_text/export.c \
@@ -84,7 +107,6 @@ SOURCES =\
metadata/snapshot_manip.c \
metadata/thin_manip.c \
metadata/vg.c \
mirror/mirrored.c \
misc/crc.c \
misc/lvm-exec.c \
misc/lvm-file.c \
@@ -98,19 +120,55 @@ SOURCES =\
mm/memlock.c \
notify/lvmnotify.c \
properties/prop_common.c \
raid/raid.c \
report/properties.c \
report/report.c \
snapshot/snapshot.c \
striped/striped.c \
thin/thin.c \
uuid/uuid.c \
zero/zero.c
ifeq ("@LVM1@", "internal")
SOURCES +=\
format1/disk-rep.c \
format1/format1.c \
format1/import-export.c \
format1/import-extents.c \
format1/layout.c \
format1/lvm1-label.c \
format1/vg_number.c
endif
ifeq ("@POOL@", "internal")
SOURCES +=\
format_pool/disk_rep.c \
format_pool/format_pool.c \
format_pool/import_export.c \
format_pool/pool_label.c
endif
ifeq ("@CLUSTER@", "internal")
SOURCES += locking/cluster_locking.c
endif
ifeq ("@SNAPSHOTS@", "internal")
SOURCES += snapshot/snapshot.c
endif
ifeq ("@MIRRORS@", "internal")
SOURCES += mirror/mirrored.c
endif
ifeq ("@RAID@", "internal")
SOURCES += raid/raid.c
endif
ifeq ("@THIN@", "internal")
SOURCES += thin/thin.c
endif
ifeq ("@CACHE@", "internal")
SOURCES += cache_segtype/cache.c
endif
ifeq ("@DEVMAPPER@", "yes")
SOURCES +=\
activate/dev_manager.c \
@@ -143,7 +201,14 @@ LIB_STATIC = $(LIB_NAME).a
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS =\
format1 \
format_pool \
snapshot \
mirror \
notify \
raid \
thin \
cache_segtype \
locking
endif

View File

@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -28,8 +28,6 @@
#include "config.h"
#include "segtype.h"
#include "sharedlib.h"
#include "lvmcache.h"
#include "metadata.h"
#include <limits.h>
#include <fcntl.h>
@@ -37,6 +35,19 @@
#define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
int lvm1_present(struct cmd_context *cmd)
{
static char path[PATH_MAX];
if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
< 0) {
log_error("LVM1 proc global snprintf failed");
return 0;
}
return (path_exists(path)) ? 1 : 0;
}
int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
struct dm_list *modules)
{
@@ -595,7 +606,7 @@ int module_present(struct cmd_context *cmd, const char *target_name)
#endif
struct stat st;
char path[PATH_MAX];
int i = dm_snprintf(path, sizeof(path), "%smodule/dm_%s",
int i = dm_snprintf(path, (sizeof(path) - 1), "%smodule/dm_%s",
dm_sysfs_dir(), target_name);
if (i > 0) {
@@ -995,10 +1006,8 @@ int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
return_0;
if (!(r = dev_manager_raid_status(dm, lv, &status))) {
dev_manager_destroy(dm);
return_0;
}
if (!(r = dev_manager_raid_status(dm, lv, &status)))
stack;
*data_offset = status->data_offset;
@@ -1534,11 +1543,8 @@ static int _lv_is_active(const struct logical_volume *lv,
if (skip_cluster_query)
goto out;
if ((r = cluster_lock_held(lv->lvid.s, "", &e)) >= 0) {
if (l && e)
r = 0; /* exclusive locally */
if ((r = cluster_lock_held(lv->lvid.s, "", &e)) >= 0)
goto out;
}
/*
* If lock query is not supported (due to interfacing with old
@@ -1656,10 +1662,7 @@ static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd
if (!(dmevh = dm_event_handler_create()))
return_NULL;
if (!cmd->default_settings.dmeventd_executable)
cmd->default_settings.dmeventd_executable = find_config_tree_str(cmd, dmeventd_executable_CFG, NULL);
if (dm_event_handler_set_dmeventd_path(dmevh, cmd->default_settings.dmeventd_executable))
if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, dmeventd_executable_CFG, NULL)))
goto_bad;
if (dso && dm_event_handler_set_dso(dmevh, dso))
@@ -1675,18 +1678,21 @@ static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd
bad:
dm_event_handler_destroy(dmevh);
return NULL;
}
char *get_monitor_dso_path(struct cmd_context *cmd, int id)
char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
{
const char *libpath = find_config_tree_str(cmd, id, NULL);
char path[PATH_MAX];
char *path;
get_shared_library_path(cmd, libpath, path, sizeof(path));
if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
log_error("Failed to allocate dmeventd library path.");
return NULL;
}
return dm_strdup(path);
get_shared_library_path(cmd, libpath, path, PATH_MAX);
return path;
}
static char *_build_target_uuid(struct cmd_context *cmd, const struct logical_volume *lv)
@@ -1703,18 +1709,13 @@ static char *_build_target_uuid(struct cmd_context *cmd, const struct logical_vo
return build_dm_uuid(cmd->mem, lv, layer);
}
static int _device_registered_with_dmeventd(struct cmd_context *cmd,
const struct logical_volume *lv,
const char **dso,
int *pending, int *monitored)
static int _device_registered_with_dmeventd(struct cmd_context *cmd, const struct logical_volume *lv, int *pending, const char **dso)
{
char *uuid;
enum dm_event_mask evmask;
enum dm_event_mask evmask = 0;
struct dm_event_handler *dmevh;
int r;
*pending = 0;
*monitored = 0;
if (!(uuid = _build_target_uuid(cmd, lv)))
return_0;
@@ -1722,20 +1723,9 @@ static int _device_registered_with_dmeventd(struct cmd_context *cmd,
if (!(dmevh = _create_dm_event_handler(cmd, uuid, NULL, 0, DM_EVENT_ALL_ERRORS)))
return_0;
if ((r = dm_event_get_registered_device(dmevh, 0))) {
if (r == -ENOENT) {
r = 1;
goto out;
}
r = 0;
goto_out;
}
/* FIXME: why do we care which 'dso' is monitoring? */
if (dso && (*dso = dm_event_handler_get_dso(dmevh)) &&
!(*dso = dm_pool_strdup(cmd->mem, *dso))) {
r = 0;
goto_out;
if (dm_event_get_registered_device(dmevh, 0)) {
dm_event_handler_destroy(dmevh);
return 0;
}
evmask = dm_event_handler_get_event_mask(dmevh);
@@ -1744,25 +1734,21 @@ static int _device_registered_with_dmeventd(struct cmd_context *cmd,
evmask &= ~DM_EVENT_REGISTRATION_PENDING;
}
*monitored = evmask;
r = 1;
out:
if (dso && (*dso = dm_event_handler_get_dso(dmevh)) && !(*dso = dm_pool_strdup(cmd->mem, *dso)))
log_error("Failed to duplicate dso name.");
dm_event_handler_destroy(dmevh);
return r;
return evmask;
}
int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
const struct logical_volume *lv,
int *pending, int *monitored)
const struct logical_volume *lv, int *pending)
{
char *uuid;
enum dm_event_mask evmask;
enum dm_event_mask evmask = 0;
struct dm_event_handler *dmevh;
int r;
*pending = 0;
*monitored = 0;
if (!dso)
return_0;
@@ -1773,13 +1759,9 @@ int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
return_0;
if ((r = dm_event_get_registered_device(dmevh, 0))) {
if (r == -ENOENT) {
r = 1;
goto out;
}
r = 0;
goto_out;
if (dm_event_get_registered_device(dmevh, 0)) {
dm_event_handler_destroy(dmevh);
return 0;
}
evmask = dm_event_handler_get_event_mask(dmevh);
@@ -1788,12 +1770,9 @@ int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
evmask &= ~DM_EVENT_REGISTRATION_PENDING;
}
*monitored = evmask;
r = 1;
out:
dm_event_handler_destroy(dmevh);
return r;
return evmask;
}
int target_register_events(struct cmd_context *cmd, const char *dso, const struct logical_volume *lv,
@@ -1821,7 +1800,7 @@ int target_register_events(struct cmd_context *cmd, const char *dso, const struc
if (!r)
return_0;
log_verbose("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
log_very_verbose("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
return 1;
}
@@ -1836,7 +1815,7 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
const struct lv_activate_opts *laopts, int monitor)
{
#ifdef DMEVENTD
int i, pending = 0, monitored = 0;
int i, pending = 0, monitored;
int r = 1;
struct dm_list *snh, *snht;
struct lv_segment *seg;
@@ -1844,7 +1823,6 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
int (*monitor_fn) (struct lv_segment *s, int e);
uint32_t s;
static const struct lv_activate_opts zlaopts = { 0 };
struct lv_activate_opts mirr_laopts = { .origin_only = 1 };
struct lvinfo info;
const char *dso = NULL;
int new_unmonitor;
@@ -1946,7 +1924,9 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
continue;
if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
monitor)) {
stack;
log_error("Failed to %smonitor %s",
monitor ? "" : "un",
display_lvname(seg_lv(seg, s)));
r = 0;
}
}
@@ -1981,21 +1961,11 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
!seg->segtype->ops->target_monitored) /* doesn't support registration */
continue;
if (!monitor) {
if (!monitor)
/* When unmonitoring, obtain existing dso being used. */
if (!_device_registered_with_dmeventd(cmd, seg_is_snapshot(seg) ? seg->cow : seg->lv,
&dso, &pending, &monitored)) {
log_warn("WARNING: Failed to %smonitor %s.",
monitor ? "" : "un",
display_lvname(seg_is_snapshot(seg) ? seg->cow : seg->lv));
return 0;
}
} else if (!seg->segtype->ops->target_monitored(seg, &pending, &monitored)) {
log_warn("WARNING: Failed to %smonitor %s.",
monitor ? "" : "un",
display_lvname(seg->lv));
return 0;
}
monitored = _device_registered_with_dmeventd(cmd, seg_is_snapshot(seg) ? seg->cow : seg->lv, &pending, &dso);
else
monitored = seg->segtype->ops->target_monitored(seg, &pending);
/* FIXME: We should really try again if pending */
monitored = (pending) ? 0 : monitored;
@@ -2007,9 +1977,7 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
if (monitored)
log_verbose("%s already monitored.", display_lvname(lv));
else if (seg->segtype->ops->target_monitor_events) {
log_very_verbose("Monitoring %s with %s.%s", display_lvname(lv),
seg->segtype->dso,
test_mode() ? " [Test mode: skipping this]" : "");
log_verbose("Monitoring %s%s", display_lvname(lv), test_mode() ? " [Test mode: skipping this]" : "");
monitor_fn = seg->segtype->ops->target_monitor_events;
}
} else {
@@ -2031,42 +1999,26 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
if (new_unmonitor) {
if (!target_register_events(cmd, dso, seg_is_snapshot(seg) ? seg->cow : lv, 0, 0, 10)) {
log_warn("WARNING: %s: segment unmonitoring failed.",
display_lvname(lv));
log_error("%s: segment unmonitoring failed.",
display_lvname(lv));
return 0;
}
} else if (monitor_fn) {
/* FIXME specify events */
if (!monitor_fn(seg, 0)) {
log_warn("WARNING: %s: %s segment monitoring function failed.",
display_lvname(lv), lvseg_name(seg));
log_error("%s: %s segment monitoring function failed.",
display_lvname(lv), lvseg_name(seg));
return 0;
}
} else
continue;
if (!vg_write_lock_held() && lv_is_mirror(lv)) {
mirr_laopts.exclusive = lv_is_active_exclusive_locally(lv) ? 1 : 0;
/*
* Commands vgchange and lvchange do use read-only lock when changing
* monitoring (--monitor y|n). All other use cases hold 'write-lock'
* so they skip this dm mirror table refreshing step.
*/
if (!_lv_activate_lv(lv, &mirr_laopts)) {
stack;
r = 0;
}
}
/* Check [un]monitor results */
/* Try a couple times if pending, but not forever... */
for (i = 0;; i++) {
pending = 0;
if (!seg->segtype->ops->target_monitored(seg, &pending, &monitored)) {
stack;
r = 0;
break;
}
monitored = seg->segtype->ops->target_monitored(seg, &pending);
if (!pending || i >= 40)
break;
log_very_verbose("%s %smonitoring still pending: waiting...",
@@ -2079,8 +2031,8 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
}
if (!r && !error_message_produced())
log_warn("WARNING: %sonitoring %s failed.", monitor ? "M" : "Not m",
display_lvname(lv));
log_error("%sonitoring %s failed.", monitor ? "M" : "Not m",
display_lvname(lv));
return r;
#else
return 1;
@@ -2111,12 +2063,6 @@ static int _preload_detached_lv(struct logical_volume *lv, void *data)
return_0;
}
if (!lv_is_visible(lv) && (lv_pre = find_lv(detached->lv_pre->vg, lv->name)) &&
lv_is_visible(lv_pre)) {
if (!_lv_preload(lv_pre, detached->laopts, detached->flush_required))
return_0;
}
/* FIXME: condition here should be far more limiting to really
* detect detached LVs */
if ((lv_pre = find_lv(detached->lv_pre->vg, lv->name))) {
@@ -2146,67 +2092,11 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
struct dm_pool *mem = NULL;
struct dm_list suspend_lvs;
struct lv_list *lvl;
const union lvid *lvid = (const union lvid *) lvid_s;
const char *vgid = (const char *)lvid->id[0].uuid;
struct volume_group *vg;
struct volume_group *vg_pre;
int found;
if (!activation())
return 1;
if (!cmd->is_clvmd)
goto skip_read;
if (lv && lv_pre)
goto skip_read;
if (!(vg = lvmcache_get_saved_vg(vgid, 0))) {
log_debug("lv_suspend did not find saved_vg %.8s so reading", vgid);
if (!(vg = vg_read_by_vgid(cmd, vgid, 0))) {
log_error("lv_suspend could not read vgid %.8s", vgid);
goto out;
}
log_debug("lv_suspend using read vg %s %d %p", vg->name, vg->seqno, vg);
} else {
log_debug("lv_suspend using saved_vg %s %d %p", vg->name, vg->seqno, vg);
}
if (!(vg_pre = lvmcache_get_saved_vg(vgid, 1))) {
log_debug("lv_suspend did not find pre saved_vg %.8s so reading", vgid);
if (!(vg_pre = vg_read_by_vgid(cmd, vgid, 1))) {
log_error("lv_suspend could not read pre vgid %.8s", vgid);
goto out;
}
log_debug("lv_suspend using pre read vg %s %d %p", vg_pre->name, vg_pre->seqno, vg_pre);
} else {
log_debug("lv_suspend using pre saved_vg %s %d %p", vg_pre->name, vg_pre->seqno, vg_pre);
}
/*
* Note that vg and vg_pre returned by vg_read_by_vgid will
* not be the same as saved_vg_old/saved_vg_new that would
* be returned by lvmcache_get_saved_vg() because the saved_vg's
* are copies of the vg struct that is created by _vg_read.
* (Should we grab and use the saved_vg to use here instead of
* the vg returned by vg_read_by_vgid?)
*/
if ((vg->status & EXPORTED_VG) || (vg_pre->status & EXPORTED_VG)) {
log_error("Volume group \"%s\" is exported", vg->name);
goto out;
}
lv = lv_to_free = find_lv_in_vg_by_lvid(vg, lvid);
lv_pre = lv_pre_to_free = find_lv_in_vg_by_lvid(vg_pre, lvid);
if (!lv || !lv_pre) {
log_error("lv_suspend could not find lv %p lv_pre %p vg %p vg_pre %p vgid %s",
lv, lv_pre, vg, vg_pre, vgid);
goto out;
}
skip_read:
/* lv comes from committed metadata */
if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto_out;
@@ -2231,19 +2121,6 @@ skip_read:
if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
goto_out;
/*
* Save old and new (current and precommitted) versions of the
* VG metadata for lv_resume() to use, since lv_resume can't
* read metadata given that devices are suspended. lv_resume()
* will resume LVs using the old/current metadata if the vg_commit
* did happen (or failed), and it will resume LVs using the
* new/precommitted metadata if the vg_commit succeeded.
*/
if (cmd->is_clvmd) {
lvmcache_save_vg(lv->vg, 0);
lvmcache_save_vg(lv_pre->vg, 1);
}
if (!info.exists || info.suspended) {
if (!error_if_not_suspended) {
r = 1;
@@ -2450,55 +2327,16 @@ static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
struct lv_activate_opts *laopts, int error_if_not_active,
const struct logical_volume *lv)
{
const struct logical_volume *lv_to_free = NULL;
struct dm_list *snh;
struct volume_group *vg = NULL;
struct logical_volume *lv_found = NULL;
const union lvid *lvid;
const char *vgid;
struct lvinfo info;
int r = 0;
if (!activation())
return 1;
/*
* When called in clvmd, lvid_s is set and lv is not. We need to
* get the VG metadata without reading disks because devs are
* suspended. lv_suspend() saved old and new VG metadata for us
* to use here. If vg_commit() happened, lvmcache_get_saved_vg_latest
* will return the new metadata for us to use in resuming LVs.
* If vg_commit() did not happen, lvmcache_get_saved_vg_latest
* returns the old metadata which we use to resume LVs.
*/
if (!lv && lvid_s) {
lvid = (const union lvid *) lvid_s;
vgid = (const char *)lvid->id[0].uuid;
if ((vg = lvmcache_get_saved_vg_latest(vgid))) {
log_debug_activation("Resuming LVID %s found saved vg seqno %d %s", lvid_s, vg->seqno, vg->name);
if ((lv_found = find_lv_in_vg_by_lvid(vg, lvid))) {
log_debug_activation("Resuming LVID %s found saved LV %s", lvid_s, display_lvname(lv_found));
lv = lv_found;
} else
log_debug_activation("Resuming LVID %s did not find saved LV", lvid_s);
} else
log_debug_activation("Resuming LVID %s did not find saved VG", lvid_s);
/*
* resume must have been called without a preceding suspend,
* so we need to read the vg.
*/
if (!lv) {
log_debug_activation("Resuming LVID %s reading VG", lvid_s);
if (!(lv_found = lv_from_lvid(cmd, lvid_s, 0))) {
log_debug_activation("Resuming LVID %s failed to read VG", lvid_s);
goto out;
}
lv = lv_found;
}
}
if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto_out;
if (!lv_is_origin(lv) && !lv_is_thin_volume(lv) && !lv_is_thin_pool(lv))
laopts->origin_only = 0;
@@ -2559,6 +2397,9 @@ needs_resume:
r = 1;
out:
if (lv_to_free)
release_vg(lv_to_free->vg);
return r;
}
@@ -2733,15 +2574,6 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto out;
if (!laopts->exclusive &&
(lv_is_origin(lv) ||
seg_only_exclusive(first_seg(lv)))) {
log_error(INTERNAL_ERROR "Trying non-exlusive activation of %s with "
"a volume type %s requiring exclusive activation.",
display_lvname(lv), lvseg_name(first_seg(lv)));
return 0;
}
if (filter && !_passes_activation_filter(cmd, lv)) {
log_verbose("Not activating %s since it does not pass "
"activation filter.", display_lvname(lv));
@@ -2787,12 +2619,7 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
goto out;
}
/* Component LV activation is enforced to be 'read-only' */
/* TODO: should not apply for LVs in maintenance mode */
if (!lv_is_visible(lv) && lv_is_component(lv)) {
laopts->read_only = 1;
laopts->component_lv = lv;
} else if (filter)
if (filter)
laopts->read_only = _passes_readonly_filter(cmd, lv);
log_debug_activation("Activating %s%s%s%s%s.", display_lvname(lv),
@@ -2808,7 +2635,7 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
* Nothing to do?
*/
if (info.exists && !info.suspended && info.live_table &&
(info.read_only == read_only_lv(lv, laopts, NULL))) {
(info.read_only == read_only_lv(lv, laopts))) {
r = 1;
log_debug_activation("LV %s is already active.", display_lvname(lv));
goto out;
@@ -2979,112 +2806,3 @@ void activation_exit(void)
dev_manager_exit();
}
#endif
static int _component_cb(struct logical_volume *lv, void *data)
{
struct logical_volume **component_lv = (struct logical_volume **) data;
if (lv_is_locked(lv) || lv_is_pvmove(lv) ||/* ignoring */
/* thin-pool is special and it's using layered device */
(lv_is_thin_pool(lv) && pool_is_active(lv)))
return -1;
if (lv_is_active(lv)) {
if (!lv_is_component(lv) || lv_is_visible(lv))
return -1; /* skip whole subtree */
log_debug_activation("Found active component LV %s.", display_lvname(lv));
*component_lv = lv;
return 0; /* break any further processing */
}
return 1;
}
/*
* Finds out for any LV if any of its component LVs are active.
* Function first checks if an existing LV is visible and active eventually
* it's lock holding LV is already active. In such case sub LV cannot be
* actived alone and no further checking is needed.
*
* Returns active component LV if there is such.
*/
const struct logical_volume *lv_component_is_active(const struct logical_volume *lv)
{
const struct logical_volume *component_lv = NULL;
const struct logical_volume *holder_lv = lv_lock_holder(lv);
if ((holder_lv != lv) && lv_is_active(holder_lv))
return NULL; /* Lock holding LV is active, do not check components */
if (_component_cb((struct logical_volume *) lv, &holder_lv) == 1)
(void) for_each_sub_lv((struct logical_volume *) lv, _component_cb,
(void*) &component_lv);
return component_lv;
}
/*
* Finds out if any LV above is active, as stacked device tree can be composed of
* chained set of LVs.
*
* Returns active holder LV if there is such.
*/
const struct logical_volume *lv_holder_is_active(const struct logical_volume *lv)
{
const struct logical_volume *holder;
const struct seg_list *sl;
if (lv_is_locked(lv) || lv_is_pvmove(lv))
return NULL; /* Skip pvmove/locked LV tracking */
dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
/* Recursive call for upper-stack holder */
if ((holder = lv_holder_is_active(sl->seg->lv)))
return holder;
if (lv_is_active(sl->seg->lv)) {
log_debug_activation("Found active holder LV %s.", display_lvname(sl->seg->lv));
return sl->seg->lv;
}
}
return NULL;
}
static int _deactivate_sub_lv_cb(struct logical_volume *lv, void *data)
{
struct logical_volume **slv = data;
if (lv_is_thin_pool(lv) || lv_is_external_origin(lv))
return -1;
if (!deactivate_lv(lv->vg->cmd, lv)) {
*slv = lv;
return 0;
}
return 1;
}
/*
* Deactivates LV toghether with explicit deactivation call made also for all its component LVs.
*/
int deactivate_lv_with_sub_lv(const struct logical_volume *lv)
{
struct logical_volume *flv;
if (!deactivate_lv(lv->vg->cmd, lv)) {
log_error("Cannot deactivate logical volume %s.",
display_lvname(lv));
return 0;
}
if (!for_each_sub_lv((struct logical_volume *)lv, _deactivate_sub_lv_cb, &flv)) {
log_error("Cannot deactivate subvolume %s of logical volume %s.",
display_lvname(flv), display_lvname(lv));
return 0;
}
return 1;
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -83,7 +83,6 @@ struct lv_activate_opts {
* flags are persistent in udev db for any spurious event
* that follows. */
unsigned resuming; /* Set when resuming after a suspend. */
const struct logical_volume *component_lv;
};
void set_activation(int activation, int silent);
@@ -91,6 +90,7 @@ int activation(void);
int driver_version(char *version, size_t size);
int library_version(char *version, size_t size);
int lvm1_present(struct cmd_context *cmd);
int module_present(struct cmd_context *cmd, const char *target_name);
int target_present_version(struct cmd_context *cmd, const char *target_name,
@@ -198,11 +198,6 @@ int lv_is_active_exclusive(const struct logical_volume *lv);
int lv_is_active_exclusive_locally(const struct logical_volume *lv);
int lv_is_active_exclusive_remotely(const struct logical_volume *lv);
/* Check is any component LV is active */
const struct logical_volume *lv_component_is_active(const struct logical_volume *lv);
const struct logical_volume *lv_holder_is_active(const struct logical_volume *lv);
int deactivate_lv_with_sub_lv(const struct logical_volume *lv);
int lv_has_target_type(struct dm_pool *mem, const struct logical_volume *lv,
const char *layer, const char *target_type);
@@ -211,9 +206,9 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
#ifdef DMEVENTD
# include "libdevmapper-event.h"
char *get_monitor_dso_path(struct cmd_context *cmd, int id);
char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath);
int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
const struct logical_volume *lv, int *pending, int *monitored);
const struct logical_volume *lv, int *pending);
int target_register_events(struct cmd_context *cmd, const char *dso, const struct logical_volume *lv,
int evmask __attribute__((unused)), int set, int timeout);
#endif

View File

@@ -1,6 +1,6 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -32,8 +32,6 @@
#define MAX_TARGET_PARAMSIZE 50000
#define LVM_UDEV_NOSCAN_FLAG DM_SUBSYSTEM_UDEV_FLAG0
#define CRYPT_TEMP "CRYPT-TEMP"
#define STRATIS "stratis-"
typedef enum {
PRELOAD,
@@ -74,17 +72,10 @@ struct dev_manager {
struct lv_layer {
const struct logical_volume *lv;
const char *old_name;
int visible_component;
};
int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts, const char *layer)
int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts)
{
if (layer && lv_is_cow(lv))
return 0; /* Keep snapshot's COW volume writable */
if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv))
return 0; /* Keep RAID SubLvs writable */
return (laopts->read_only || !(lv->status & LVM_WRITE));
}
@@ -450,21 +441,15 @@ static int _ignore_suspended_snapshot_component(struct device *dev)
do {
next = dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type)
continue;
if (!strcmp(target_type, TARGET_NAME_SNAPSHOT)) {
if (!target_type || !strcmp(target_type, TARGET_NAME_SNAPSHOT)) {
if (!params || sscanf(params, "%d:%d %d:%d", &major1, &minor1, &major2, &minor2) != 4) {
log_warn("WARNING: Incorrect snapshot table found for %d:%d.",
(int)MAJOR(dev->dev), (int)MINOR(dev->dev));
log_error("Incorrect snapshot table found.");
goto out;
}
r = r || _device_is_suspended(major1, minor1) || _device_is_suspended(major2, minor2);
} else if (!strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) {
if (!params || sscanf(params, "%d:%d", &major1, &minor1) != 2) {
log_warn("WARNING: Incorrect snapshot-origin table found for %d:%d.",
(int)MAJOR(dev->dev), (int)MINOR(dev->dev));
log_error("Incorrect snapshot-origin table found.");
goto out;
}
r = r || _device_is_suspended(major1, minor1);
@@ -499,7 +484,7 @@ static int _ignore_unusable_thins(struct device *dev)
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!params || sscanf(params, "%d:%d", &major, &minor) != 2) {
log_warn("WARNING: Cannot get thin-pool major:minor for thin device %d:%d.",
log_error("Failed to get thin-pool major:minor for thin device %d:%d.",
(int)MAJOR(dev->dev), (int)MINOR(dev->dev));
goto out;
}
@@ -529,47 +514,6 @@ out:
return r;
}
static int _ignore_invalid_snapshot(const char *params)
{
struct dm_status_snapshot *s;
struct dm_pool *mem;
int r = 0;
if (!(mem = dm_pool_create("invalid snapshots", 128)))
return_0;
if (!dm_get_status_snapshot(mem, params, &s))
stack;
else
r = s->invalid;
dm_pool_destroy(mem);
return r;
}
static int _ignore_frozen_raid(struct device *dev, const char *params)
{
struct dm_status_raid *s;
struct dm_pool *mem;
int r = 0;
if (!(mem = dm_pool_create("frozen raid", 128)))
return_0;
if (!dm_get_status_raid(mem, params, &s))
stack;
else if (s->sync_action && !strcmp(s->sync_action, "frozen")) {
log_warn("WARNING: %s frozen raid device (%d:%d) needs inspection.",
dev_name(dev), (int)MAJOR(dev->dev), (int)MINOR(dev->dev));
r = 1;
}
dm_pool_destroy(mem);
return r;
}
/*
* device_is_usable
* @dev
@@ -639,26 +583,12 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check)
}
}
if (check.check_reserved && uuid &&
(!strncmp(uuid, CRYPT_TEMP, sizeof(CRYPT_TEMP) - 1) ||
!strncmp(uuid, STRATIS, sizeof(STRATIS) - 1))) {
/* Skip private crypto devices */
log_debug_activation("%s: Reserved uuid %s on %s device %s not usable.",
dev_name(dev), uuid,
uuid[0] == 'C' ? "crypto" : "stratis",
name);
goto out;
}
/* FIXME Also check for mpath no paths */
do {
next = dm_get_next_target(dmt, next, &start, &length,
&target_type, &params);
if (!target_type)
continue;
if (check.check_blocked && !strcmp(target_type, TARGET_NAME_MIRROR)) {
if (check.check_blocked && target_type && !strcmp(target_type, TARGET_NAME_MIRROR)) {
if (ignore_lvm_mirrors()) {
log_debug_activation("%s: Scanning mirror devices is disabled.", dev_name(dev));
goto out;
@@ -692,33 +622,21 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check)
* correctly, not just snapshots but any cobimnation possible
* in a stack - use proper dm tree to check this instead.
*/
if (check.check_suspended &&
if (check.check_suspended && target_type &&
(!strcmp(target_type, TARGET_NAME_SNAPSHOT) || !strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) &&
_ignore_suspended_snapshot_component(dev)) {
log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (!strcmp(target_type, TARGET_NAME_SNAPSHOT) &&
_ignore_invalid_snapshot(params)) {
log_debug_activation("%s: Invalid %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (!strncmp(target_type, TARGET_NAME_RAID, 4) && _ignore_frozen_raid(dev, params)) {
log_debug_activation("%s: Frozen %s device %s not usable.",
dev_name(dev), target_type, name);
goto out;
}
/* TODO: extend check struct ? */
if (!strcmp(target_type, TARGET_NAME_THIN) &&
if (target_type && !strcmp(target_type, TARGET_NAME_THIN) &&
!_ignore_unusable_thins(dev)) {
log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (strcmp(target_type, TARGET_NAME_ERROR))
if (target_type && strcmp(target_type, TARGET_NAME_ERROR))
only_error_target = 0;
} while (next);
@@ -1672,8 +1590,7 @@ int dev_manager_mknodes(const struct logical_volume *lv)
return_0;
if (dminfo.exists) {
/* read-only component LV is also made visible */
if (_lv_has_mknode(lv) || (dminfo.read_only && lv_is_component(lv)))
if (_lv_has_mknode(lv))
r = _dev_manager_lv_mknodes(lv);
} else
r = _dev_manager_lv_rmnodes(lv);
@@ -1735,8 +1652,7 @@ static int _check_udev_fallback(struct cmd_context *cmd)
#endif /* UDEV_SYNC_SUPPORT */
static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_volume *lv,
const char *layer, int noscan, int temporary,
int visible_component)
const char *layer, int noscan, int temporary)
{
uint16_t udev_flags = 0;
@@ -1752,7 +1668,7 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_vol
* If not, create just the /dev/mapper content.
*/
/* FIXME: add target's method for this */
if (lv_is_new_thin_pool(lv) || visible_component)
if (lv_is_new_thin_pool(lv))
/* New thin-pool is regular LV with -tpool UUID suffix. */
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
@@ -1950,8 +1866,7 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
}
if (info.exists && !dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
_get_udev_flags(dm, lv, layer,
0, 0, 0))) {
_get_udev_flags(dm, lv, layer, 0, 0))) {
log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.",
info.major, info.minor);
return 0;
@@ -1983,56 +1898,9 @@ struct pool_cb_data {
int skip_zero; /* to skip zeroed device header (check first 64B) */
int exec; /* which binary to call */
int opts;
struct {
unsigned maj;
unsigned min;
unsigned patch;
} version;
const char *global;
};
/*
* Simple version of check function calling 'tool -V'
*
* Returns 1 if the tool's version is equal or better to given.
* Otherwise it returns 0.
*/
static int _check_tool_version(struct cmd_context *cmd, const char *tool,
unsigned maj, unsigned min, unsigned patch)
{
const char *argv[] = { tool, "-V", NULL };
struct pipe_data pdata;
FILE *f;
char buf[128] = { 0 };
char *nl;
unsigned v_maj, v_min, v_patch;
int ret = 0;
if (!(f = pipe_open(cmd, argv, 0, &pdata))) {
log_warn("WARNING: Cannot read output from %s.", argv[0]);
} else {
if (fgets(buf, sizeof(buf) - 1, f) &&
(sscanf(buf, "%u.%u.%u", &v_maj, &v_min, &v_patch) == 3)) {
if ((v_maj > maj) ||
((v_maj == maj) &&
((v_min > min) ||
(v_min == min && v_patch >= patch))))
ret = 1;
if ((nl = strchr(buf, '\n')))
nl[0] = 0; /* cut newline away */
log_verbose("Found version of %s %s is %s then requested %u.%u.%u.",
argv[0], buf, ret ? "better" : "older", maj, min, patch);
} else
log_warn("WARNING: Cannot parse output '%s' from %s.", buf, argv[0]);
(void) pipe_close(&pdata);
}
return ret;
}
static int _pool_callback(struct dm_tree_node *node,
dm_node_callback_t type, void *cb_data)
{
@@ -2108,19 +1976,6 @@ static int _pool_callback(struct dm_tree_node *node,
if (!(ret = exec_cmd(pool_lv->vg->cmd, (const char * const *)argv,
&status, 0))) {
if (status == ENOENT) {
log_warn("WARNING: Check is skipped, please install recommended missing binary %s!",
argv[0]);
return 1;
}
if ((data->version.maj || data->version.min || data->version.patch) &&
!_check_tool_version(pool_lv->vg->cmd, argv[0],
data->version.maj, data->version.min, data->version.patch)) {
log_warn("WARNING: Check is skipped, please upgrade installed version of %s!",
argv[0]);
return 1;
}
switch (type) {
case DM_NODE_CALLBACK_PRELOADED:
log_err_once("Check of pool %s failed (status:%d). "
@@ -2178,10 +2033,6 @@ static int _pool_register_callback(struct dev_manager *dm,
data->exec = global_cache_check_executable_CFG;
data->opts = global_cache_check_options_CFG;
data->global = "cache";
if (first_seg(first_seg(lv)->pool_lv)->cache_metadata_format > 1) {
data->version.maj = 0;
data->version.min = 7;
}
} else {
log_error(INTERNAL_ERROR "Registering unsupported pool callback.");
return 0;
@@ -2955,7 +2806,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
}
lvlayer->lv = lv;
lvlayer->visible_component = (laopts->component_lv == lv) ? 1 : 0;
/*
* Add LV to dtree.
@@ -2969,11 +2819,10 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
layer ? UINT32_C(0) : (uint32_t) lv->major,
layer ? UINT32_C(0) : (uint32_t) lv->minor,
read_only_lv(lv, laopts, layer),
read_only_lv(lv, laopts),
((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
lvlayer,
_get_udev_flags(dm, lv, layer, laopts->noscan, laopts->temporary,
lvlayer->visible_component))))
_get_udev_flags(dm, lv, layer, laopts->noscan, laopts->temporary))))
return_0;
/* Store existing name so we can do rename later */
@@ -3117,7 +2966,7 @@ static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root
r = 0;
continue;
}
if (_lv_has_mknode(lvlayer->lv) || lvlayer->visible_component) {
if (_lv_has_mknode(lvlayer->lv)) {
if (!_dev_manager_lv_mknodes(lvlayer->lv))
r = 0;
continue;

View File

@@ -27,7 +27,7 @@ struct dm_info;
struct device;
struct lv_seg_status;
int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts, const char *layer);
int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts);
/*
* Constructor and destructor.

View File

@@ -441,7 +441,7 @@ static int _fs_op(fs_op_t type, const char *dev_dir, const char *vg_name,
const char *lv_name, const char *dev, const char *old_lv_name,
int check_udev)
{
if (prioritized_section()) {
if (critical_section()) {
if (!_stack_fs_op(type, dev_dir, vg_name, lv_name, dev,
old_lv_name, check_udev))
return_0;
@@ -487,7 +487,7 @@ int fs_rename_lv(const struct logical_volume *lv, const char *dev,
void fs_unlock(void)
{
if (!prioritized_section()) {
if (!critical_section()) {
log_debug_activation("Syncing device names");
/* Wait for all processed udev devices */
if (!dm_udev_wait(_fs_cookie))

1482
lib/cache/lvmcache.c vendored

File diff suppressed because it is too large Load Diff

48
lib/cache/lvmcache.h vendored
View File

@@ -59,17 +59,21 @@ struct lvmcache_vgsummary {
const char *lock_type;
uint32_t mda_checksum;
size_t mda_size;
int zero_offset;
int seqno;
};
int lvmcache_init(struct cmd_context *cmd);
int lvmcache_init(void);
void lvmcache_allow_reads_with_lvmetad(void);
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset);
/*
* lvmcache_label_scan() will scan labels the first time it's
* called, but not on subsequent calls, unless
* lvmcache_force_next_label_scan() is called first
* to force the next lvmcache_label_scan() to scan again.
*/
void lvmcache_force_next_label_scan(void);
int lvmcache_label_scan(struct cmd_context *cmd);
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid);
/* Add/delete a device */
struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
@@ -78,7 +82,6 @@ struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
uint32_t vgstatus);
int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt);
void lvmcache_del(struct lvmcache_info *info);
void lvmcache_del_dev(struct device *dev);
/* Update things */
int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
@@ -102,8 +105,10 @@ struct lvmcache_vginfo *lvmcache_vginfo_from_vgid(const char *vgid);
struct lvmcache_info *lvmcache_info_from_pvid(const char *pvid, struct device *dev, int valid_only);
const char *lvmcache_vgname_from_vgid(struct dm_pool *mem, const char *vgid);
const char *lvmcache_vgid_from_vgname(struct cmd_context *cmd, const char *vgname);
struct device *lvmcache_device_from_pvid(struct cmd_context *cmd, const struct id *pvid, uint64_t *label_sector);
const char *lvmcache_pvid_from_devname(struct cmd_context *cmd, const char *devname);
struct device *lvmcache_device_from_pvid(struct cmd_context *cmd, const struct id *pvid,
unsigned *scan_done_once, uint64_t *label_sector);
const char *lvmcache_pvid_from_devname(struct cmd_context *cmd,
const char *devname);
char *lvmcache_vgname_from_pvid(struct cmd_context *cmd, const char *pvid);
const char *lvmcache_vgname_from_info(struct lvmcache_info *info);
const struct format_type *lvmcache_fmt_from_info(struct lvmcache_info *info);
@@ -129,16 +134,20 @@ int lvmcache_get_vgnameids(struct cmd_context *cmd, int include_internal,
struct dm_list *lvmcache_get_pvids(struct cmd_context *cmd, const char *vgname,
const char *vgid);
/* Returns cached volume group metadata. */
struct volume_group *lvmcache_get_vg(struct cmd_context *cmd, const char *vgname,
const char *vgid, unsigned precommitted);
void lvmcache_drop_metadata(const char *vgname, int drop_precommitted);
void lvmcache_commit_metadata(const char *vgname);
int lvmcache_pvid_is_locked(const char *pvid);
int lvmcache_fid_add_mdas(struct lvmcache_info *info, struct format_instance *fid,
const char *id, int id_len);
int lvmcache_fid_add_mdas_pv(struct lvmcache_info *info, struct format_instance *fid);
int lvmcache_fid_add_mdas_vg(struct lvmcache_vginfo *vginfo, struct format_instance *fid);
int lvmcache_populate_pv_fields(struct lvmcache_info *info,
struct volume_group *vg,
struct physical_volume *pv);
struct physical_volume *pv,
int scan_label_only);
int lvmcache_check_format(struct lvmcache_info *info, const struct format_type *fmt);
void lvmcache_del_mdas(struct lvmcache_info *info);
void lvmcache_del_das(struct lvmcache_info *info);
@@ -155,8 +164,6 @@ uint32_t lvmcache_ext_flags(struct lvmcache_info *info);
const struct format_type *lvmcache_fmt(struct lvmcache_info *info);
struct label *lvmcache_get_label(struct lvmcache_info *info);
struct label *lvmcache_get_dev_label(struct device *dev);
int lvmcache_has_dev_info(struct device *dev);
void lvmcache_update_pv(struct lvmcache_info *info, struct physical_volume *pv,
const struct format_type *fmt);
@@ -180,6 +187,7 @@ int lvmcache_foreach_pv(struct lvmcache_vginfo *vginfo,
uint64_t lvmcache_device_size(struct lvmcache_info *info);
void lvmcache_set_device_size(struct lvmcache_info *info, uint64_t size);
struct device *lvmcache_device(struct lvmcache_info *info);
void lvmcache_make_valid(struct lvmcache_info *info);
int lvmcache_is_orphan(struct lvmcache_info *info);
int lvmcache_uncertain_ownership(struct lvmcache_info *info);
unsigned lvmcache_mda_count(struct lvmcache_info *info);
@@ -188,8 +196,6 @@ uint64_t lvmcache_smallest_mda_size(struct lvmcache_info *info);
int lvmcache_found_duplicate_pvs(void);
void lvmcache_pvscan_duplicate_check(struct cmd_context *cmd);
int lvmcache_get_unused_duplicate_devs(struct cmd_context *cmd, struct dm_list *head);
int vg_has_duplicate_pvs(struct volume_group *vg);
@@ -209,20 +215,4 @@ void lvmcache_remove_unchosen_duplicate(struct device *dev);
int lvmcache_pvid_in_unchosen_duplicates(const char *pvid);
int lvmcache_get_vg_devs(struct cmd_context *cmd,
struct lvmcache_vginfo *vginfo,
struct dm_list *devs);
void lvmcache_set_independent_location(const char *vgname);
int lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const char *vgid);
/*
* These are clvmd-specific functions and are not related to lvmcache.
* FIXME: rename these with a clvm_ prefix in place of lvmcache_
*/
void lvmcache_save_vg(struct volume_group *vg, int precommitted);
struct volume_group *lvmcache_get_saved_vg(const char *vgid, int precommitted);
struct volume_group *lvmcache_get_saved_vg_latest(const char *vgid);
void lvmcache_drop_saved_vgid(const char *vgid);
#endif

520
lib/cache/lvmetad.c vendored
View File

@@ -37,7 +37,9 @@ static const char *_lvmetad_socket = NULL;
static struct cmd_context *_lvmetad_cmd = NULL;
static int64_t _lvmetad_update_timeout;
static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg, const char *vgid, struct format_type *fmt);
static int _found_lvm1_metadata = 0;
static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg);
static uint64_t _monotonic_seconds(void)
{
@@ -1091,17 +1093,14 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
* invalidated the cached vg.
*/
if (rescan) {
if (!(vg2 = _lvmetad_pvscan_vg(cmd, vg, vgid, fmt))) {
if (!(vg2 = _lvmetad_pvscan_vg(cmd, vg))) {
log_debug_lvmetad("VG %s from lvmetad not found during rescan.", vgname);
fid = NULL;
release_vg(vg);
vg = NULL;
goto out;
}
fid->ref_count++;
release_vg(vg);
fid->ref_count--;
fmt->ops->destroy_instance(fid);
vg = vg2;
fid = vg2->fid;
}
@@ -1109,14 +1108,14 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
dm_list_iterate_items(pvl, &vg->pvs) {
if (!_pv_update_struct_pv(pvl->pv, fid)) {
vg = NULL;
goto_out; /* FIXME: use an error path that disables lvmetad */
goto_out; /* FIXME error path */
}
}
dm_list_iterate_items(pvl, &vg->pvs_outdated) {
if (!_pv_update_struct_pv(pvl->pv, fid)) {
vg = NULL;
goto_out; /* FIXME: use an error path that disables lvmetad */
goto_out; /* FIXME error path */
}
}
@@ -1678,7 +1677,7 @@ int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct devi
if (vg && result) {
seqno_after = daemon_reply_int(reply, "seqno_after", -1);
if ((seqno_after != (int) vg->seqno) ||
if ((seqno_after != vg->seqno) ||
(seqno_after != daemon_reply_int(reply, "seqno_before", -1)))
log_warn("WARNING: Inconsistent metadata found for VG %s", vg->name);
}
@@ -1762,7 +1761,6 @@ int lvmetad_pv_gone_by_dev(struct device *dev)
*/
struct _lvmetad_pvscan_baton {
struct cmd_context *cmd;
struct volume_group *vg;
struct format_instance *fid;
};
@@ -1773,7 +1771,7 @@ static int _lvmetad_pvscan_single(struct metadata_area *mda, void *baton)
struct volume_group *vg;
if (mda_is_ignored(mda) ||
!(vg = mda->ops->vg_read(b->fid, "", mda, NULL, NULL)))
!(vg = mda->ops->vg_read(b->fid, "", mda, NULL, NULL, 1)))
return 1;
/* FIXME Also ensure contents match etc. */
@@ -1785,33 +1783,6 @@ static int _lvmetad_pvscan_single(struct metadata_area *mda, void *baton)
return 1;
}
/*
* FIXME: handle errors and do proper comparison of metadata from each area
* like vg_read and fall back to real vg_read from disk if there's any problem.
*/
static int _lvmetad_pvscan_vg_single(struct metadata_area *mda, void *baton)
{
struct _lvmetad_pvscan_baton *b = baton;
struct volume_group *vg = NULL;
if (mda_is_ignored(mda))
return 1;
if (!(vg = mda->ops->vg_read(b->fid, "", mda, NULL, NULL)))
return 1;
if (!b->vg)
b->vg = vg;
else if (vg->seqno > b->vg->seqno) {
release_vg(b->vg);
b->vg = vg;
} else
release_vg(vg);
return 1;
}
/*
* The lock manager may detect that the vg cached in lvmetad is out of date,
* due to something like an lvcreate from another host.
@@ -1821,41 +1792,41 @@ static int _lvmetad_pvscan_vg_single(struct metadata_area *mda, void *baton)
* the VG, and that PV may have been reused for another VG.
*/
static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg,
const char *vgid, struct format_type *fmt)
static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg)
{
char pvid_s[ID_LEN + 1] __attribute__((aligned(8)));
char uuid[64] __attribute__((aligned(8)));
struct label *label;
struct volume_group *vg_ret = NULL;
struct dm_config_tree *vgmeta_ret = NULL;
struct dm_config_tree *vgmeta;
struct pv_list *pvl, *pvl_new;
struct device_list *devl, *devlsafe;
struct device_list *devl, *devl_new, *devlsafe;
struct dm_list pvs_scan;
struct dm_list pvs_drop;
struct lvmcache_vginfo *vginfo = NULL;
struct dm_list pvs_new;
struct lvmcache_info *info = NULL;
struct format_instance *fid;
struct format_instance_ctx fic = { .type = 0 };
struct _lvmetad_pvscan_baton baton;
struct volume_group *save_vg;
struct dm_config_tree *save_meta;
struct device *save_dev = NULL;
uint32_t save_seqno = 0;
int found_new_pvs = 0;
int retried_reads = 0;
int missing_devs = 0;
int check_new_pvs = 0;
int found;
save_vg = NULL;
save_meta = NULL;
save_dev = NULL;
save_seqno = 0;
dm_list_init(&pvs_scan);
dm_list_init(&pvs_drop);
dm_list_init(&pvs_new);
log_debug_lvmetad("Rescan VG %s to update lvmetad (seqno %u).", vg->name, vg->seqno);
log_debug_lvmetad("Rescanning VG %s (seqno %u).", vg->name, vg->seqno);
/*
* Make sure this command knows about all PVs from lvmetad.
* Another host may have added a PV to the VG, and some
* commands do not always populate their lvmcache with
* all devs from lvmetad, so they would fail to find
* the new PV when scanning the VG. So make sure this
* command knows about all PVs from lvmetad.
*/
lvmcache_seed_infos_from_lvmetad(cmd);
@@ -1870,111 +1841,54 @@ static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct v
dm_list_add(&pvs_scan, &devl->list);
}
/*
* Rescan labels/metadata only from devs that we previously
* saw in the VG. If we find below that there are new PVs
* in the VG, we'll have to rescan all devices to find which
* device(s) are now being used.
*/
log_debug_lvmetad("Rescan VG %s scanning data from devs in previous metadata.", vg->name);
label_scan_devs(cmd, cmd->full_filter, &pvs_scan);
scan_more:
/*
* Check if any pvs_scan entries are no longer PVs.
* In that case, label_read/_find_label_header will have
* found no label_header, and would have dropped the
* info struct for the device from lvmcache. So, if
* we look up the info struct here and don't find it,
* we can infer it's no longer a PV.
*
* FIXME: we should record specific results from the
* label_read and then check specifically for whatever
* result means "no label was found", rather than going
* about this indirectly via the lvmcache side effects.
*/
dm_list_iterate_items_safe(devl, devlsafe, &pvs_scan) {
if (!(info = lvmcache_info_from_pvid(devl->dev->pvid, devl->dev, 0))) {
/* Another host removed this PV from the VG. */
log_debug_lvmetad("Rescan VG %s from %s dropping dev (no label).",
vg->name, dev_name(devl->dev));
dm_list_move(&pvs_drop, &devl->list);
}
}
fic.type = FMT_INSTANCE_MDAS | FMT_INSTANCE_AUX_MDAS;
fic.context.vg_ref.vg_name = vg->name;
fic.context.vg_ref.vg_id = vgid;
retry_reads:
if (!(fid = fmt->ops->create_instance(fmt, &fic))) {
/* FIXME: are there only internal reasons for failures here? */
log_error("Reading VG %s failed to create format instance.", vg->name);
return NULL;
}
/* FIXME: not sure if this is necessary */
fid->ref_count++;
baton.fid = fid;
baton.cmd = cmd;
/*
* FIXME: this vg_read path does not have the ability to repair
* any problems with the VG, e.g. VG on one dev has an older
* seqno. When vg_read() is reworked, we need to fall back
* to using that from here (and vg_read's from lvmetad) when
* there is a problem. Perhaps by disabling lvmetad when a
* VG problem is detected, causing commands to fully fall
* back to disk, which will repair the VG. Then lvmetad can
* be repopulated and re-enabled (possibly automatically.)
*/
/*
* Do a low level vg_read on each dev, verify the vg returned
* from metadata on each device is for the VG being read
* (the PV may have been removed from the VG being read and
* added to a different one), and return this vg to the caller
* as the current vg to use.
*
* The label scan above will have saved in lvmcache which
* vg each device is used in, so we could figure that part
* out without doing the vg_read.
* Run the equivalent of lvmetad_pvscan_single on each dev in the VG.
*/
dm_list_iterate_items_safe(devl, devlsafe, &pvs_scan) {
if (!devl->dev)
continue;
log_debug_lvmetad("Rescan VG %s getting metadata from %s.",
vg->name, dev_name(devl->dev));
log_debug_lvmetad("Rescan VG %s scanning %s.", vg->name, dev_name(devl->dev));
if (!label_read(devl->dev, &label, 0)) {
/* Another host removed this PV from the VG. */
log_debug_lvmetad("Rescan VG %s found %s was removed.", vg->name, dev_name(devl->dev));
if ((info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0)))
lvmcache_del(info);
/*
* The info struct for this dev knows what and where
* the mdas are for this dev (the label scan saved
* the mda locations for this dev on the lvmcache info struct).
*/
if (!(info = lvmcache_info_from_pvid(devl->dev->pvid, devl->dev, 0))) {
log_debug_lvmetad("Rescan VG %s from %s dropping dev (no info).",
vg->name, dev_name(devl->dev));
dm_list_move(&pvs_drop, &devl->list);
continue;
}
info = (struct lvmcache_info *) label->info;
baton.vg = NULL;
baton.fid = lvmcache_fmt(info)->ops->create_instance(lvmcache_fmt(info), &fic);
if (!baton.fid)
return_NULL;
if (baton.fid->fmt->features & FMT_OBSOLETE) {
log_debug_lvmetad("Ignoring obsolete format on PV %s in VG %s.", dev_name(devl->dev), vg->name);
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
dm_list_move(&pvs_drop, &devl->list);
continue;
}
/*
* Read VG metadata from this dev's mdas.
*/
lvmcache_foreach_mda(info, _lvmetad_pvscan_vg_single, &baton);
lvmcache_foreach_mda(info, _lvmetad_pvscan_single, &baton);
/*
* The PV may have been removed from the VG by another host
* since we last read the VG.
*/
if (!baton.vg) {
log_debug_lvmetad("Rescan VG %s from %s dropping dev (no metadata).",
vg->name, dev_name(devl->dev));
log_debug_lvmetad("Rescan VG %s did not find %s.", vg->name, dev_name(devl->dev));
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
dm_list_move(&pvs_drop, &devl->list);
continue;
}
@@ -1984,15 +1898,10 @@ static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct v
* different VG since we last read the VG.
*/
if (strcmp(baton.vg->name, vg->name)) {
log_debug_lvmetad("Rescan VG %s from %s dropping dev (other VG %s).",
vg->name, dev_name(devl->dev), baton.vg->name);
release_vg(baton.vg);
continue;
}
if (!(vgmeta = export_vg_to_config_tree(baton.vg))) {
log_error("VG export to config tree failed");
log_debug_lvmetad("Rescan VG %s found different VG %s on PV %s.",
vg->name, baton.vg->name, dev_name(devl->dev));
release_vg(baton.vg);
dm_list_move(&pvs_drop, &devl->list);
continue;
}
@@ -2002,35 +1911,20 @@ static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct v
* read from each other dev.
*/
if (save_vg && (save_seqno != baton.vg->seqno)) {
/* FIXME: fall back to vg_read to correct this. */
log_warn("WARNING: inconsistent metadata for VG %s on devices %s seqno %u and %s seqno %u.",
vg->name, dev_name(save_dev), save_seqno,
dev_name(devl->dev), baton.vg->seqno);
log_warn("WARNING: temporarily disable lvmetad to repair metadata.");
if (!save_seqno)
save_seqno = baton.vg->seqno;
/* Use the most recent */
if (save_seqno < baton.vg->seqno) {
release_vg(save_vg);
dm_config_destroy(save_meta);
save_vg = baton.vg;
save_meta = vgmeta;
save_seqno = baton.vg->seqno;
save_dev = devl->dev;
} else {
release_vg(baton.vg);
dm_config_destroy(vgmeta);
}
continue;
if (!(vgmeta = export_vg_to_config_tree(baton.vg))) {
log_error("VG export to config tree failed");
release_vg(baton.vg);
return NULL;
}
if (!save_vg) {
save_vg = baton.vg;
save_meta = vgmeta;
save_seqno = baton.vg->seqno;
if (!vgmeta_ret) {
vgmeta_ret = vgmeta;
save_dev = devl->dev;
} else {
struct dm_config_node *meta1 = save_meta->root;
struct dm_config_node *meta1 = vgmeta_ret->root;
struct dm_config_node *meta2 = vgmeta->root;
struct dm_config_node *sib1 = meta1->sib;
struct dm_config_node *sib2 = meta2->sib;
@@ -2055,128 +1949,73 @@ static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct v
meta2->sib = NULL;
if (compare_config(meta1, meta2)) {
/* FIXME: fall back to vg_read to correct this. */
log_warn("WARNING: inconsistent metadata for VG %s on devices %s seqno %u and %s seqno %u.",
vg->name, dev_name(save_dev), save_seqno,
dev_name(devl->dev), baton.vg->seqno);
log_warn("WARNING: temporarily disable lvmetad to repair metadata.");
log_error("VG %s metadata comparison failed for device %s vs %s",
vg->name, dev_name(devl->dev), save_dev ? dev_name(save_dev) : "none");
_log_debug_inequality(vg->name, save_meta->root, vgmeta->root);
_log_debug_inequality(vg->name, vgmeta_ret->root, vgmeta->root);
meta1->sib = sib1;
meta2->sib = sib2;
/* no right choice, just use the previous copy */
release_vg(baton.vg);
dm_config_destroy(vgmeta);
dm_config_destroy(vgmeta_ret);
release_vg(baton.vg);
return NULL;
}
meta1->sib = sib1;
meta2->sib = sib2;
release_vg(baton.vg);
dm_config_destroy(vgmeta);
}
}
/* FIXME: see above */
fid->ref_count--;
/*
* Look for any new PVs in the VG metadata that were not in our
* previous version of the VG.
*
* (Don't look for new PVs after a rescan and retry.)
*/
found_new_pvs = 0;
if (save_vg && !retried_reads) {
dm_list_iterate_items(pvl_new, &save_vg->pvs) {
found = 0;
dm_list_iterate_items(pvl, &vg->pvs) {
if (pvl_new->pv->dev != pvl->pv->dev)
continue;
found = 1;
break;
}
/*
* PV in new VG metadata not found in old VG metadata.
* There's a good chance we don't know about this new
* PV or what device it's on; a label scan is needed
* of all devices so we know which device the VG is
* now using.
*/
if (!found) {
found_new_pvs++;
strncpy(pvid_s, (char *) &pvl_new->pv->id, sizeof(pvid_s) - 1);
if (!id_write_format((const struct id *)&pvid_s, uuid, sizeof(uuid)))
stack;
log_debug_lvmetad("Rescan VG %s found new PV %s.", vg->name, uuid);
}
}
}
if (!save_vg && retried_reads) {
log_error("VG %s not found after rescanning devices.", vg->name);
goto out;
}
/*
* Do a full rescan of devices, then look up which devices the
* scan found for this VG name, and select those devices to
* read metadata from in the loop above (rather than the list
* of devices we created from our last copy of the vg metadata.)
*
* Case 1: VG we knew is no longer on any of the devices we knew it
* to be on (save_vg is NULL, which means the metadata wasn't found
* when reading mdas on each of the initial pvs_scan devices).
* Rescan all devs and then retry reading metadata from the devs that
* the scan finds associated with this VG.
*
* Case 2: VG has new PVs but we don't know what devices they are
* so rescan all devs and then retry reading metadata from the devs
* that the scan finds associated with this VG.
*
* (N.B. after a retry, we don't check for found_new_pvs.)
*/
if (!save_vg || found_new_pvs) {
if (!save_vg)
log_debug_lvmetad("Rescan VG %s did not find VG on previous devs.", vg->name);
if (found_new_pvs)
log_debug_lvmetad("Rescan VG %s scanning all devs to find new PVs.", vg->name);
label_scan(cmd);
if (!(vginfo = lvmcache_vginfo_from_vgname(vg->name, NULL))) {
log_error("VG %s vg info not found after rescanning devices.", vg->name);
goto out;
}
/*
* Set pvs_scan to devs that the label scan found
* in the VG and retry the metadata reading loop.
* Look for any new PVs in the VG metadata that were not in our
* previous version of the VG. Add them to pvs_new to be
* scanned in this loop just like the old PVs.
*/
dm_list_init(&pvs_scan);
if (!lvmcache_get_vg_devs(cmd, vginfo, &pvs_scan)) {
log_error("VG %s info devs not found after rescanning devices.", vg->name);
goto out;
if (!check_new_pvs) {
check_new_pvs = 1;
dm_list_iterate_items(pvl_new, &baton.vg->pvs) {
found = 0;
dm_list_iterate_items(pvl, &vg->pvs) {
if (pvl_new->pv->dev != pvl->pv->dev)
continue;
found = 1;
break;
}
if (found)
continue;
if (!pvl_new->pv->dev) {
strncpy(pvid_s, (char *) &pvl_new->pv->id, sizeof(pvid_s) - 1);
if (!id_write_format((const struct id *)&pvid_s, uuid, sizeof(uuid)))
stack;
log_error("Device not found for PV %s in VG %s", uuid, vg->name);
missing_devs++;
continue;
}
if (!(devl_new = dm_pool_zalloc(cmd->mem, sizeof(*devl_new))))
return_NULL;
devl_new->dev = pvl_new->pv->dev;
dm_list_add(&pvs_new, &devl_new->list);
log_debug_lvmetad("Rescan VG %s found %s was added.", vg->name, dev_name(devl_new->dev));
}
}
log_debug_lvmetad("Rescan VG %s has %d PVs after label scan.",
vg->name, dm_list_size(&pvs_scan));
release_vg(baton.vg);
}
if (save_vg)
release_vg(save_vg);
if (save_meta)
dm_config_destroy(save_meta);
save_vg = NULL;
save_meta = NULL;
save_dev = NULL;
save_seqno = 0;
found_new_pvs = 0;
retried_reads = 1;
goto retry_reads;
/*
* Do the same scanning above for any new PVs.
*/
if (!dm_list_empty(&pvs_new)) {
dm_list_init(&pvs_scan);
dm_list_splice(&pvs_scan, &pvs_new);
dm_list_init(&pvs_new);
log_debug_lvmetad("Rescan VG %s found new PVs to scan.", vg->name);
goto scan_more;
}
if (missing_devs) {
if (vgmeta_ret)
dm_config_destroy(vgmeta_ret);
return_NULL;
}
/*
@@ -2185,50 +2024,52 @@ static struct volume_group *_lvmetad_pvscan_vg(struct cmd_context *cmd, struct v
dm_list_iterate_items(devl, &pvs_drop) {
if (!devl->dev)
continue;
log_debug_lvmetad("Rescan VG %s removing %s from lvmetad.", vg->name, dev_name(devl->dev));
if (!lvmetad_pv_gone_by_dev(devl->dev)) {
/* FIXME: use an error path that disables lvmetad */
log_error("Failed to remove %s from lvmetad.", dev_name(devl->dev));
}
log_debug_lvmetad("Rescan VG %s dropping %s.", vg->name, dev_name(devl->dev));
if (!lvmetad_pv_gone_by_dev(devl->dev))
return_NULL;
}
/*
* Update lvmetad with the newly read version of the VG.
* When the seqno is unchanged the cached VG can be left.
* Update the VG in lvmetad.
*/
if (save_vg && (save_seqno != vg->seqno)) {
dm_list_iterate_items(devl, &pvs_scan) {
if (!devl->dev)
continue;
log_debug_lvmetad("Rescan VG %s removing %s from lvmetad to replace.",
vg->name, dev_name(devl->dev));
if (!lvmetad_pv_gone_by_dev(devl->dev)) {
/* FIXME: use an error path that disables lvmetad */
log_error("Failed to remove %s from lvmetad.", dev_name(devl->dev));
}
if (vgmeta_ret) {
fid = lvmcache_fmt(info)->ops->create_instance(lvmcache_fmt(info), &fic);
if (!(vg_ret = import_vg_from_config_tree(vgmeta_ret, fid))) {
log_error("VG import from config tree failed");
lvmcache_fmt(info)->ops->destroy_instance(fid);
goto out;
}
log_debug_lvmetad("Rescan VG %s updating lvmetad from seqno %u to seqno %u.",
vg->name, vg->seqno, save_seqno);
/*
* If this vg_update fails the cached metadata in
* lvmetad will remain invalid.
* Update lvmetad with the newly read version of the VG.
* When the seqno is unchanged the cached VG can be left.
*/
save_vg->lvmetad_update_pending = 1;
if (!lvmetad_vg_update_finish(save_vg)) {
/* FIXME: use an error path that disables lvmetad */
log_error("Failed to update lvmetad with new VG meta");
if (save_seqno != vg->seqno) {
dm_list_iterate_items(devl, &pvs_scan) {
if (!devl->dev)
continue;
log_debug_lvmetad("Rescan VG %s dropping to replace %s.", vg->name, dev_name(devl->dev));
if (!lvmetad_pv_gone_by_dev(devl->dev))
return_NULL;
}
log_debug_lvmetad("Rescan VG %s updating lvmetad from seqno %u to seqno %u.",
vg->name, vg->seqno, save_seqno);
/*
* If this vg_update fails the cached metadata in
* lvmetad will remain invalid.
*/
vg_ret->lvmetad_update_pending = 1;
if (!lvmetad_vg_update_finish(vg_ret))
log_error("Failed to update lvmetad with new VG meta");
}
dm_config_destroy(vgmeta_ret);
}
out:
if (!save_vg && fid)
fmt->ops->destroy_instance(fid);
if (save_meta)
dm_config_destroy(save_meta);
if (save_vg)
log_debug_lvmetad("Rescan VG %s done (new seqno %u).", save_vg->name, save_vg->seqno);
return save_vg;
if (vg_ret)
log_debug_lvmetad("Rescan VG %s done (seqno %u).", vg_ret->name, vg_ret->seqno);
return vg_ret;
}
int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
@@ -2238,12 +2079,9 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
struct label *label;
struct lvmcache_info *info;
struct _lvmetad_pvscan_baton baton;
const struct format_type *fmt;
/* Create a dummy instance. */
struct format_instance_ctx fic = { .type = 0 };
log_debug_lvmetad("Scan metadata from dev %s", dev_name(dev));
if (!lvmetad_used()) {
log_error("Cannot proceed since lvmetad is not active.");
return 0;
@@ -2254,35 +2092,39 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
return 1;
}
if (!(info = lvmcache_info_from_pvid(dev->pvid, dev, 0))) {
log_print_unless_silent("No PV info found on %s for PVID %s.", dev_name(dev), dev->pvid);
if (!label_read(dev, &label, 0)) {
log_print_unless_silent("No PV label found on %s.", dev_name(dev));
if (!lvmetad_pv_gone_by_dev(dev))
goto_bad;
return 1;
}
if (!(label = lvmcache_get_label(info))) {
log_print_unless_silent("No PV label found for %s.", dev_name(dev));
if (!lvmetad_pv_gone_by_dev(dev))
goto_bad;
return 1;
}
info = (struct lvmcache_info *) label->info;
fmt = lvmcache_fmt(info);
baton.cmd = cmd;
baton.vg = NULL;
baton.fid = fmt->ops->create_instance(fmt, &fic);
baton.fid = lvmcache_fmt(info)->ops->create_instance(lvmcache_fmt(info), &fic);
if (!baton.fid)
goto_bad;
if (baton.fid->fmt->features & FMT_OBSOLETE) {
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
log_warn("WARNING: Disabling lvmetad cache which does not support obsolete (lvm1) metadata.");
lvmetad_set_disabled(cmd, LVMETAD_DISABLE_REASON_LVM1);
_found_lvm1_metadata = 1;
/*
* return 1 (success) so that we'll continue to populate lvmetad
* instead of leaving the update incomplete.
*/
return 1;
}
lvmcache_foreach_mda(info, _lvmetad_pvscan_single, &baton);
if (!baton.vg)
fmt->ops->destroy_instance(baton.fid);
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
if (!lvmetad_pv_found(cmd, (const struct id *) &dev->pvid, dev, fmt,
if (!lvmetad_pv_found(cmd, (const struct id *) &dev->pvid, dev, lvmcache_fmt(info),
label->sector, baton.vg, found_vgnames, changed_vgnames)) {
release_vg(baton.vg);
goto_bad;
@@ -2348,15 +2190,6 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait)
replacing_other_update = 1;
}
label_scan(cmd);
lvmcache_pvscan_duplicate_check(cmd);
if (lvmcache_found_duplicate_pvs()) {
log_warn("WARNING: Scan found duplicate PVs.");
return 0;
}
log_verbose("Scanning all devices to update lvmetad.");
if (!(iter = dev_iter_create(cmd->lvmetad_filter, 1))) {
@@ -2440,9 +2273,11 @@ int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait)
}
/*
* If lvmetad is disabled, and no duplicate PVs were seen, then re-enable lvmetad.
* If lvmetad is disabled, and no lvm1 metadata was seen and no
* duplicate PVs were seen, then re-enable lvmetad.
*/
if (lvmetad_is_disabled(cmd, &reason) && !lvmcache_found_duplicate_pvs()) {
if (lvmetad_is_disabled(cmd, &reason) &&
!lvmcache_found_duplicate_pvs() && !_found_lvm1_metadata) {
log_debug_lvmetad("Enabling lvmetad which was previously disabled.");
lvmetad_clear_disabled(cmd);
}
@@ -2537,18 +2372,11 @@ static int _lvmetad_get_pv_cache_list(struct cmd_context *cmd, struct dm_list *p
*/
static void _update_pv_in_udev(struct cmd_context *cmd, dev_t devt)
{
/*
* FIXME: this is diabled as part of removing dev_opens
* to integrate bcache. If this is really needed, we
* can do a separate open/close here.
*/
log_debug_devs("SKIP device %d:%d open to update udev",
(int)MAJOR(devt), (int)MINOR(devt));
#if 0
struct device *dev;
log_debug_devs("device %d:%d open to update udev",
(int)MAJOR(devt), (int)MINOR(devt));
if (!(dev = dev_cache_get_by_devt(devt, cmd->lvmetad_filter))) {
log_error("_update_pv_in_udev no dev found");
return;
@@ -2561,7 +2389,6 @@ static void _update_pv_in_udev(struct cmd_context *cmd, dev_t devt)
if (!dev_close(dev))
stack;
#endif
}
/*
@@ -2733,8 +2560,6 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force)
*/
_lvmetad_get_pv_cache_list(cmd, &pvc_before);
log_debug_lvmetad("Rescan all devices to validate global cache.");
/*
* Update the local lvmetad cache so it correctly reflects any
* changes made on remote hosts. (It's possible that this command
@@ -2803,7 +2628,7 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force)
_update_changed_pvs_in_udev(cmd, &pvc_before, &pvc_after);
}
log_debug_lvmetad("Rescanned all devices");
log_debug_lvmetad("Validating global lvmetad cache finished");
}
int lvmetad_vg_is_foreign(struct cmd_context *cmd, const char *vgname, const char *vgid)
@@ -3058,6 +2883,9 @@ int lvmetad_is_disabled(struct cmd_context *cmd, const char **reason)
} else if (strstr(reply_reason, LVMETAD_DISABLE_REASON_REPAIR)) {
*reason = "a repair command was run";
} else if (strstr(reply_reason, LVMETAD_DISABLE_REASON_LVM1)) {
*reason = "LVM1 metadata was found";
} else if (strstr(reply_reason, LVMETAD_DISABLE_REASON_DUPLICATES)) {
*reason = "duplicate PVs were found";

76
lib/cache/lvmetad.h vendored
View File

@@ -17,8 +17,6 @@
#include "config-util.h"
#include <stdint.h>
struct volume_group;
struct cmd_context;
struct dm_config_tree;
@@ -165,48 +163,38 @@ void lvmetad_clear_disabled(struct cmd_context *cmd);
# else /* LVMETAD_SUPPORT */
static inline int lvmetad_connect(struct cmd_context *cmd) {return 0;}
static inline void lvmetad_disconnect(void) {}
static inline void lvmetad_make_unused(struct cmd_context *cmd) {}
static inline int lvmetad_used(void) {return 0;}
static inline void lvmetad_set_socket(const char *thing) {}
static inline int lvmetad_socket_present(void) {return 0;}
static inline int lvmetad_pidfile_present(void) {return 0;}
static inline void lvmetad_set_token(const struct dm_config_value *filter) {}
static inline void lvmetad_release_token(void) {}
static inline int lvmetad_vg_update_pending(struct volume_group *vg) {return 1;}
static inline int lvmetad_vg_update_finish(struct volume_group *vg) {return 1;}
static inline int lvmetad_vg_remove_pending(struct volume_group *vg) {return 1;}
static inline int lvmetad_vg_remove_finish(struct volume_group *vg) {return 1;}
static inline int lvmetad_pv_found(struct cmd_context *cmd, const struct id *pvid, struct device *dev,
const struct format_type *fmt, uint64_t label_sector,
struct volume_group *vg,
struct dm_list *found_vgnames,
struct dm_list *changed_vgnames) {return 1;}
static inline int lvmetad_pv_gone(dev_t devno, const char *pv_name) {return 1;}
static inline int lvmetad_pv_gone_by_dev(struct device *dev) {return 1;}
static inline int lvmetad_pv_list_to_lvmcache(struct cmd_context *cmd) {return 1;}
static inline int lvmetad_pv_lookup(struct cmd_context *cmd, struct id pvid, int *found) {return 0;}
static inline int lvmetad_pv_lookup_by_dev(struct cmd_context *cmd, struct device *dev, int *found) {return 0;}
static inline int lvmetad_vg_list_to_lvmcache(struct cmd_context *cmd) {return 1;}
static inline int lvmetad_get_vgnameids(struct cmd_context *cmd, struct dm_list *vgnameids) {return 0;}
static inline struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd,
const char *vgname, const char *vgid) {return NULL;}
static inline int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
struct dm_list *found_vgnames,
struct dm_list *changed_vgnames) {return 0;}
static inline int lvmetad_pvscan_all_devs(struct cmd_context *cmd, int do_wait) {return 0;}
static inline int lvmetad_vg_clear_outdated_pvs(struct volume_group *vg) {return 0;}
static inline void lvmetad_validate_global_cache(struct cmd_context *cmd, int force) {}
static inline int lvmetad_token_matches(struct cmd_context *cmd) {return 1;}
static inline int lvmetad_vg_is_foreign(struct cmd_context *cmd, const char *vgname, const char *vgid) {return 0;}
static inline int lvmetad_is_disabled(struct cmd_context *cmd, const char **reason) {return 0;}
static inline void lvmetad_set_disabled(struct cmd_context *cmd, const char *reason) {}
static inline void lvmetad_clear_disabled(struct cmd_context *cmd) {}
# define lvmetad_disconnect() do { } while (0)
# define lvmetad_connect(cmd) (0)
# define lvmetad_make_unused(cmd) do { } while (0)
# define lvmetad_used() (0)
# define lvmetad_set_socket(a) do { } while (0)
# define lvmetad_socket_present() (0)
# define lvmetad_pidfile_present() (0)
# define lvmetad_set_token(a) do { } while (0)
# define lvmetad_release_token() do { } while (0)
# define lvmetad_vg_update(vg) (1)
# define lvmetad_vg_update_pending(vg) (1)
# define lvmetad_vg_update_finish(vg) (1)
# define lvmetad_vg_remove_pending(vg) (1)
# define lvmetad_vg_remove_finish(vg) (1)
# define lvmetad_pv_found(cmd, pvid, dev, fmt, label_sector, vg, found_vgnames, changed_vgnames) (1)
# define lvmetad_pv_gone(devno, pv_name) (1)
# define lvmetad_pv_gone_by_dev(dev) (1)
# define lvmetad_pv_list_to_lvmcache(cmd) (1)
# define lvmetad_pv_lookup(cmd, pvid, found) (0)
# define lvmetad_pv_lookup_by_dev(cmd, dev, found) (0)
# define lvmetad_vg_list_to_lvmcache(cmd) (1)
# define lvmetad_get_vgnameids(cmd, vgnameids) do { } while (0)
# define lvmetad_vg_lookup(cmd, vgname, vgid) (NULL)
# define lvmetad_pvscan_single(cmd, dev, found_vgnames, changed_vgnames) (0)
# define lvmetad_pvscan_all_devs(cmd, do_wait) (0)
# define lvmetad_vg_clear_outdated_pvs(vg) do { } while (0)
# define lvmetad_validate_global_cache(cmd, force) do { } while (0)
# define lvmetad_vg_is_foreign(cmd, vgname, vgid) (0)
# define lvmetad_token_matches(cmd) (1)
# define lvmetad_is_disabled(cmd, reason) (0)
# define lvmetad_set_disabled(cmd, reason) do { } while (0)
# define lvmetad_clear_disabled(cmd) do { } while (0)
# endif /* LVMETAD_SUPPORT */

View File

@@ -1,4 +1,4 @@
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -14,10 +14,11 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES=\
vdo/status.c
SOURCES = cache.c
LIB_SHARED = liblvm2cache.$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
include $(top_builddir)/make.tmpl
LIB_NAME = libdevicemapper
LIB_STATIC = $(LIB_NAME).a
install: install_lvm2_plugin

View File

@@ -36,9 +36,16 @@
#include "sharedlib.h"
#endif
#ifdef LVM1_INTERNAL
#include "format1.h"
#endif
#ifdef POOL_INTERNAL
#include "format_pool.h"
#endif
#include <locale.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/utsname.h>
#include <syslog.h>
#include <time.h>
@@ -680,6 +687,9 @@ static int _process_config(struct cmd_context *cmd)
if (find_config_tree_bool(cmd, report_two_word_unknown_device_CFG, NULL))
init_unknown_device_name("unknown device");
init_detect_internal_vg_cache_corruption
(find_config_tree_bool(cmd, global_detect_internal_vg_cache_corruption_CFG, NULL));
if (!_init_system_id(cmd))
return_0;
@@ -1053,7 +1063,7 @@ static int _init_dev_cache(struct cmd_context *cmd)
return 1;
}
#define MAX_FILTERS 10
#define MAX_FILTERS 9
static struct dev_filter *_init_lvmetad_filter_chain(struct cmd_context *cmd)
{
@@ -1134,17 +1144,10 @@ static struct dev_filter *_init_lvmetad_filter_chain(struct cmd_context *cmd)
}
nr_filt++;
/* signature filter. Required. */
if (!(filters[nr_filt] = signature_filter_create(cmd->dev_types))) {
log_error("Failed to create signature device filter");
goto bad;
}
nr_filt++;
/* md component filter. Optional, non-critical. */
if (find_config_tree_bool(cmd, devices_md_component_detection_CFG, NULL)) {
init_md_filtering(1);
if ((filters[nr_filt] = md_filter_create(cmd, cmd->dev_types)))
if ((filters[nr_filt] = md_filter_create(cmd->dev_types)))
nr_filt++;
}
@@ -1340,6 +1343,20 @@ static int _init_formats(struct cmd_context *cmd)
const struct dm_config_node *cn;
#endif
#ifdef LVM1_INTERNAL
if (!(fmt = init_lvm1_format(cmd)))
return 0;
fmt->library = NULL;
dm_list_add(&cmd->formats, &fmt->list);
#endif
#ifdef POOL_INTERNAL
if (!(fmt = init_pool_format(cmd)))
return 0;
fmt->library = NULL;
dm_list_add(&cmd->formats, &fmt->list);
#endif
#ifdef HAVE_LIBDL
/* Load any formats in shared libs if not static */
if (!is_static() &&
@@ -1633,6 +1650,7 @@ static void _init_rand(struct cmd_context *cmd)
static void _init_globals(struct cmd_context *cmd)
{
init_full_scan_done(0);
init_mirror_in_sync(0);
}
@@ -1763,8 +1781,6 @@ void destroy_config_context(struct cmd_context *cmd)
/*
* A "config context" is a very light weight toolcontext that
* is only used for reading config settings from lvm.conf.
*
* FIXME: this needs to go back to parametrized create_toolcontext()
*/
struct cmd_context *create_config_context(void)
{
@@ -1781,9 +1797,6 @@ struct cmd_context *create_config_context(void)
if (!(cmd->libmem = dm_pool_create("library", 4 * 1024)))
goto_out;
if (!(cmd->mem = dm_pool_create("command", 4 * 1024)))
goto out;
dm_list_init(&cmd->config_files);
dm_list_init(&cmd->tags);
@@ -1814,7 +1827,7 @@ out:
}
/* Entry point */
struct cmd_context *create_toolcontext(unsigned is_clvmd,
struct cmd_context *create_toolcontext(unsigned is_long_lived,
const char *system_dir,
unsigned set_buffering,
unsigned threaded,
@@ -1841,8 +1854,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
log_error("Failed to allocate command context");
return NULL;
}
cmd->is_long_lived = is_clvmd;
cmd->is_clvmd = is_clvmd;
cmd->is_long_lived = is_long_lived;
cmd->threaded = threaded ? 1 : 0;
cmd->handles_missing_pvs = 0;
cmd->handles_unknown_segments = 0;
@@ -1861,13 +1873,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
#ifndef VALGRIND_POOL
/* Set in/out stream buffering before glibc */
if (set_buffering
#ifdef SYS_gettid
/* For threaded programs no changes of streams */
/* On linux gettid() is implemented only via syscall */
&& (syscall(SYS_gettid) == getpid())
#endif
) {
if (set_buffering) {
/* Allocate 2 buffers */
if (!(cmd->linebuffer = dm_malloc(2 * _linebuffer_size))) {
log_error("Failed to allocate line buffer.");
@@ -1898,7 +1904,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
}
}
/* Buffers are used for lines without '\n' */
} else if (!set_buffering)
} else
/* Without buffering, must not use stdin/stdout */
init_silent(1);
#endif
@@ -1971,10 +1977,6 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
if (!_init_formats(cmd))
goto_out;
if (!lvmcache_init(cmd))
goto_out;
/* FIXME: move into lvmcache_init */
if (!init_lvmcache_orphans(cmd))
goto_out;
@@ -1996,6 +1998,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
if (set_filters && !init_filters(cmd, 1))
goto_out;
cmd->default_settings.cache_vgmetadata = 1;
cmd->current_settings = cmd->default_settings;
cmd->initialized.config = 1;
@@ -2109,7 +2112,6 @@ int refresh_toolcontext(struct cmd_context *cmd)
activation_release();
lvmcache_destroy(cmd, 0, 0);
label_scan_destroy(cmd);
label_exit();
_destroy_segtypes(&cmd->segtypes);
_destroy_formats(cmd, &cmd->formats);
@@ -2196,9 +2198,6 @@ int refresh_toolcontext(struct cmd_context *cmd)
if (!_init_formats(cmd))
return_0;
if (!lvmcache_init(cmd))
return_0;
if (!init_lvmcache_orphans(cmd))
return_0;
@@ -2232,7 +2231,6 @@ void destroy_toolcontext(struct cmd_context *cmd)
archive_exit(cmd);
backup_exit(cmd);
lvmcache_destroy(cmd, 0, 0);
label_scan_destroy(cmd);
label_exit();
_destroy_segtypes(&cmd->segtypes);
_destroy_formats(cmd, &cmd->formats);

View File

@@ -42,7 +42,6 @@ struct config_info {
int cache_vgmetadata;
const char *msg_prefix;
const char *fmt_name;
const char *dmeventd_executable;
uint64_t unit_factor;
int cmd_name; /* Show command name? */
mode_t umask;
@@ -165,13 +164,6 @@ struct cmd_context {
unsigned vg_notify:1;
unsigned lv_notify:1;
unsigned pv_notify:1;
unsigned activate_component:1; /* command activates component LV */
unsigned process_component_lvs:1; /* command processes also component LVs */
unsigned mirror_warn_printed:1; /* command already printed warning about non-monitored mirrors */
unsigned pvscan_cache_single:1;
unsigned can_use_one_scan:1;
unsigned is_clvmd:1;
unsigned use_full_md_check:1;
/*
* Filtering.
@@ -237,7 +229,7 @@ struct cmd_context {
* system_dir may be NULL to use the default value.
* The environment variable LVM_SYSTEM_DIR always takes precedence.
*/
struct cmd_context *create_toolcontext(unsigned is_clvmd,
struct cmd_context *create_toolcontext(unsigned is_long_lived,
const char *system_dir,
unsigned set_buffering,
unsigned threaded,

View File

@@ -23,7 +23,6 @@
#include "toolcontext.h"
#include "lvm-file.h"
#include "memlock.h"
#include "label.h"
#include <sys/stat.h>
#include <sys/mman.h>
@@ -533,24 +532,13 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
log_error("Failed to allocate circular buffer.");
return 0;
}
if (!dev_read_bytes(dev, offset, size, buf))
if (!dev_read_circular(dev, (uint64_t) offset, size,
(uint64_t) offset2, size2, reason, buf)) {
goto out;
if (size2) {
if (!dev_read_bytes(dev, offset2, size2, buf + size))
goto out;
}
fb = buf;
}
/*
* The checksum passed in is the checksum from the mda_header
* preceding this metadata. They should always match.
* FIXME: handle case where mda_header checksum is bad,
* but the checksum calculated here is correct.
*/
if (checksum_fn && checksum !=
(checksum_fn(checksum_fn(INITIAL_CRC, (const uint8_t *)fb, size),
(const uint8_t *)(fb + size), size2))) {

View File

@@ -767,14 +767,26 @@ cfg(global_activation_CFG, "activation", global_CFG_SECTION, 0, CFG_TYPE_BOOL, D
"is not present in the kernel, disabling this should suppress\n"
"the error messages.\n")
cfg(global_fallback_to_lvm1_CFG, "fallback_to_lvm1", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, 0, vsn(1, 0, 18), NULL, 0, NULL,
"This setting is no longer used.\n")
cfg(global_fallback_to_lvm1_CFG, "fallback_to_lvm1", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_FALLBACK_TO_LVM1, vsn(1, 0, 18), "@DEFAULT_FALLBACK_TO_LVM1@", 0, NULL,
"Try running LVM1 tools if LVM cannot communicate with DM.\n"
"This option only applies to 2.4 kernels and is provided to help\n"
"switch between device-mapper kernels and LVM1 kernels. The LVM1\n"
"tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.\n"
"They will stop working once the lvm2 on-disk metadata format is used.\n")
cfg(global_format_CFG, "format", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_FORMAT, vsn(1, 0, 0), NULL, 0, NULL,
"This setting is no longer used.\n")
"The default metadata format that commands should use.\n"
"The -M 1|2 option overrides this setting.\n"
"#\n"
"Accepted values:\n"
" lvm1\n"
" lvm2\n"
"#\n")
cfg_array(global_format_libraries_CFG, "format_libraries", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 0), NULL, 0, NULL,
"This setting is no longer used.")
"Shared libraries that process different metadata formats.\n"
"If support for LVM1 metadata was compiled as a shared library use\n"
"format_libraries = \"liblvm2format1.so\"\n")
cfg_array(global_segment_libraries_CFG, "segment_libraries", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 18), NULL, 0, NULL, NULL)
@@ -856,8 +868,11 @@ cfg(global_abort_on_internal_errors_CFG, "abort_on_internal_errors", global_CFG_
"Treat any internal errors as fatal errors, aborting the process that\n"
"encountered the internal error. Please only enable for debugging.\n")
cfg(global_detect_internal_vg_cache_corruption_CFG, "detect_internal_vg_cache_corruption", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 96), NULL, vsn(2, 2, 174), NULL,
"No longer used.\n")
cfg(global_detect_internal_vg_cache_corruption_CFG, "detect_internal_vg_cache_corruption", global_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_DETECT_INTERNAL_VG_CACHE_CORRUPTION, vsn(2, 2, 96), NULL, 0, NULL,
"Internal verification of VG structures.\n"
"Check if CRC matches when a parsed VG is used multiple times. This\n"
"is useful to catch unexpected changes to cached VG structures.\n"
"Please only enable for debugging.\n")
cfg(global_metadata_read_only_CFG, "metadata_read_only", global_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_METADATA_READ_ONLY, vsn(2, 2, 75), NULL, 0, NULL,
"No operations that change on-disk metadata are permitted.\n"

View File

@@ -179,6 +179,7 @@
#define DEFAULT_LOGLEVEL 0
#define DEFAULT_INDENT 1
#define DEFAULT_ABORT_ON_INTERNAL_ERRORS 0
#define DEFAULT_DETECT_INTERNAL_VG_CACHE_CORRUPTION 0
#define DEFAULT_UNITS "r"
#define DEFAULT_SUFFIX 1
#define DEFAULT_HOSTTAGS 0

View File

@@ -1,272 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "bcache.h"
// FIXME: need to define this in a common place (that doesn't pull in deps)
#ifndef SECTOR_SHIFT
#define SECTOR_SHIFT 9
#endif
//----------------------------------------------------------------
static void byte_range_to_block_range(struct bcache *cache, uint64_t start, size_t len,
block_address *bb, block_address *be)
{
block_address block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
*bb = start / block_size;
*be = (start + len + block_size - 1) / block_size;
}
static uint64_t _min(uint64_t lhs, uint64_t rhs)
{
if (rhs < lhs)
return rhs;
return lhs;
}
//----------------------------------------------------------------
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{
block_address bb, be;
byte_range_to_block_range(cache, start, len, &bb, &be);
while (bb < be) {
bcache_prefetch(cache, fd, bb);
bb++;
}
}
//----------------------------------------------------------------
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size;
bcache_prefetch_bytes(cache, fd, start, len);
byte_range_to_block_range(cache, start, len, &bb, &be);
for (; bb != be; bb++) {
if (!bcache_get(cache, fd, bb, 0, &b))
return false;
size_t blen = _min(block_size - block_offset, len);
memcpy(data, ((unsigned char *) b->data) + block_offset, blen);
bcache_put(b);
block_offset = 0;
len -= blen;
data = ((unsigned char *) data) + blen;
}
return true;
}
//----------------------------------------------------------------
// Writing bytes and zeroing bytes are very similar, so we factor out
// this common code.
struct updater;
typedef bool (*partial_update_fn)(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len);
typedef bool (*whole_update_fn)(struct updater *u, int fd, block_address bb, block_address be);
struct updater {
struct bcache *cache;
partial_update_fn partial_fn;
whole_update_fn whole_fn;
void *data;
};
static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
{
struct bcache *cache = u->cache;
block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size;
uint64_t nr_whole;
byte_range_to_block_range(cache, start, len, &bb, &be);
// If the last block is partial, we will require a read, so let's
// prefetch it.
if ((start + len) % block_size)
bcache_prefetch(cache, fd, (start + len) / block_size);
// First block may be partial
if (block_offset) {
size_t blen = _min(block_size - block_offset, len);
if (!u->partial_fn(u, fd, bb, block_offset, blen))
return false;
len -= blen;
if (!len)
return true;
bb++;
}
// Now we write out a set of whole blocks
nr_whole = len / block_size;
if (!u->whole_fn(u, fd, bb, bb + nr_whole))
return false;
bb += nr_whole;
len -= nr_whole * block_size;
if (!len)
return true;
// Finally we write a partial end block
return u->partial_fn(u, fd, bb, 0, len);
}
//----------------------------------------------------------------
static bool _write_partial(struct updater *u, int fd, block_address bb,
uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
return false;
memcpy(((unsigned char *) b->data) + offset, u->data, len);
u->data = ((unsigned char *) u->data) + len;
bcache_put(b);
return true;
}
static bool _write_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
uint64_t block_size = bcache_block_sectors(u->cache) << SECTOR_SHIFT;
for (; bb != be; bb++) {
// We don't need to read the block since we are overwriting
// it completely.
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
return false;
memcpy(b->data, u->data, block_size);
u->data = ((unsigned char *) u->data) + block_size;
bcache_put(b);
}
return true;
}
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct updater u;
u.cache = cache;
u.partial_fn = _write_partial;
u.whole_fn = _write_whole;
u.data = data;
return _update_bytes(&u, fd, start, len);
}
//----------------------------------------------------------------
static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
return false;
memset(((unsigned char *) b->data) + offset, 0, len);
bcache_put(b);
return true;
}
static bool _zero_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
for (; bb != be; bb++) {
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
return false;
bcache_put(b);
}
return true;
}
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{
struct updater u;
u.cache = cache;
u.partial_fn = _zero_partial;
u.whole_fn = _zero_whole;
u.data = NULL;
return _update_bytes(&u, fd, start, len);
}
//----------------------------------------------------------------
static bool _set_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
uint8_t val = *((uint8_t *) u->data);
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
return false;
memset(((unsigned char *) b->data) + offset, val, len);
bcache_put(b);
return true;
}
static bool _set_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
uint8_t val = *((uint8_t *) u->data);
uint64_t len = bcache_block_sectors(u->cache) * 512;
for (; bb != be; bb++) {
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
return false;
memset((unsigned char *) b->data, val, len);
bcache_put(b);
}
return true;
}
bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val)
{
struct updater u;
u.cache = cache;
u.partial_fn = _set_partial;
u.whole_fn = _set_whole;
u.data = &val;
return _update_bytes(&u, fd, start, len);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,163 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef BCACHE_H
#define BCACHE_H
#include "libdevmapper.h"
#include <linux/fs.h>
#include <stdint.h>
#include <stdbool.h>
/*----------------------------------------------------------------*/
// FIXME: move somewhere more sensible
#define container_of(v, t, head) \
((t *)((const char *)(v) - (const char *)&((t *) 0)->head))
/*----------------------------------------------------------------*/
enum dir {
DIR_READ,
DIR_WRITE
};
typedef uint64_t block_address;
typedef uint64_t sector_t;
typedef void io_complete_fn(void *context, int io_error);
struct io_engine {
void (*destroy)(struct io_engine *e);
bool (*issue)(struct io_engine *e, enum dir d, int fd,
sector_t sb, sector_t se, void *data, void *context);
bool (*wait)(struct io_engine *e, io_complete_fn fn);
unsigned (*max_io)(struct io_engine *e);
};
struct io_engine *create_async_io_engine(void);
struct io_engine *create_sync_io_engine(void);
/*----------------------------------------------------------------*/
struct bcache;
struct block {
/* clients may only access these three fields */
int fd;
uint64_t index;
void *data;
struct bcache *cache;
struct dm_list list;
struct dm_list hash;
unsigned flags;
unsigned ref_count;
int error;
enum dir io_dir;
};
/*
* Ownership of engine passes. Engine will be destroyed even if this fails.
*/
struct bcache *bcache_create(sector_t block_size, unsigned nr_cache_blocks,
struct io_engine *engine);
void bcache_destroy(struct bcache *cache);
enum bcache_get_flags {
/*
* The block will be zeroed before get_block returns it. This
* potentially avoids a read if the block is not already in the cache.
* GF_DIRTY is implicit.
*/
GF_ZERO = (1 << 0),
/*
* Indicates the caller is intending to change the data in the block, a
* writeback will occur after the block is released.
*/
GF_DIRTY = (1 << 1)
};
sector_t bcache_block_sectors(struct bcache *cache);
unsigned bcache_nr_cache_blocks(struct bcache *cache);
unsigned bcache_max_prefetches(struct bcache *cache);
/*
* Use the prefetch method to take advantage of asynchronous IO. For example,
* if you wanted to read a block from many devices concurrently you'd do
* something like this:
*
* dm_list_iterate_items (dev, &devices)
* bcache_prefetch(cache, dev->fd, block);
*
* dm_list_iterate_items (dev, &devices) {
* if (!bcache_get(cache, dev->fd, block, &b))
* fail();
*
* process_block(b);
* }
*
* It's slightly sub optimal, since you may not run the gets in the order that
* they complete. But we're talking a very small difference, and it's worth it
* to keep callbacks out of this interface.
*/
void bcache_prefetch(struct bcache *cache, int fd, block_address index);
/*
* Returns true on success.
*/
bool bcache_get(struct bcache *cache, int fd, block_address index,
unsigned flags, struct block **result);
void bcache_put(struct block *b);
/*
* flush() does not attempt to writeback locked blocks. flush will fail
* (return false), if any unlocked dirty data cannot be written back.
*/
bool bcache_flush(struct bcache *cache);
/*
* Removes a block from the cache.
*
* If the block is dirty it will be written back first. If the writeback fails
* false will be returned.
*
* If the block is currently held false will be returned.
*/
bool bcache_invalidate(struct bcache *cache, int fd, block_address index);
/*
* Invalidates all blocks on the given descriptor. Call this before closing
* the descriptor to make sure everything is written back.
*/
bool bcache_invalidate_fd(struct bcache *cache, int fd);
//----------------------------------------------------------------
// The next four functions are utilities written in terms of the above api.
// Prefetches the blocks neccessary to satisfy a byte range.
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
// Reads, writes and zeroes bytes. Returns false if errors occur.
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val);
//----------------------------------------------------------------
#endif

View File

@@ -73,6 +73,7 @@ static void _dev_init(struct device *dev, int max_error_count)
dev->ext.src = DEV_EXT_NONE;
dm_list_init(&dev->aliases);
dm_list_init(&dev->open_list);
}
void dev_destroy_file(struct device *dev)
@@ -274,8 +275,10 @@ static int _compare_paths(const char *path0, const char *path1)
if (slash1 < slash0)
return 1;
(void) dm_strncpy(p0, path0, sizeof(p0));
(void) dm_strncpy(p1, path1, sizeof(p1));
strncpy(p0, path0, sizeof(p0) - 1);
p0[sizeof(p0) - 1] = '\0';
strncpy(p1, path1, sizeof(p1) - 1);
p1[sizeof(p1) - 1] = '\0';
s0 = p0 + 1;
s1 = p1 + 1;
@@ -1076,11 +1079,12 @@ static int _insert(const char *path, const struct stat *info,
return 1;
}
void dev_cache_scan(void)
static void _full_scan(int dev_scan)
{
struct dir_list *dl;
_cache.has_scanned = 1;
if (_cache.has_scanned && !dev_scan)
return;
_insert_dirs(&_cache.dirs);
@@ -1088,6 +1092,9 @@ void dev_cache_scan(void)
dm_list_iterate_items(dl, &_cache.files)
_insert_file(dl->dir);
_cache.has_scanned = 1;
init_full_scan_done(1);
}
int dev_cache_has_scanned(void)
@@ -1095,6 +1102,14 @@ int dev_cache_has_scanned(void)
return _cache.has_scanned;
}
void dev_cache_scan(int do_scan)
{
if (!do_scan)
_cache.has_scanned = 1;
else
_full_scan(1);
}
static int _init_preferred_names(struct cmd_context *cmd)
{
const struct dm_config_node *cn;
@@ -1158,6 +1173,7 @@ out:
int dev_cache_init(struct cmd_context *cmd)
{
_cache.names = NULL;
_cache.has_scanned = 0;
if (!(_cache.mem = dm_pool_create("dev_cache", 10 * 1024)))
return_0;
@@ -1377,7 +1393,6 @@ struct device *dev_cache_get(const char *name, struct dev_filter *f)
struct stat buf;
struct device *d = (struct device *) dm_hash_lookup(_cache.names, name);
int info_available = 0;
int ret = 1;
if (d && (d->flags & DEV_REGULAR))
return d;
@@ -1400,30 +1415,15 @@ struct device *dev_cache_get(const char *name, struct dev_filter *f)
_insert(name, info_available ? &buf : NULL, 0, obtain_device_list_from_udev());
d = (struct device *) dm_hash_lookup(_cache.names, name);
if (!d) {
dev_cache_scan();
_full_scan(0);
d = (struct device *) dm_hash_lookup(_cache.names, name);
}
}
if (!d)
return NULL;
if (d && (d->flags & DEV_REGULAR))
return d;
if (f && !(d->flags & DEV_REGULAR)) {
ret = f->passes_filter(f, d);
if (ret == -EAGAIN) {
log_debug_devs("get device by name defer filter %s", dev_name(d));
d->flags |= DEV_FILTER_AFTER_SCAN;
ret = 1;
}
}
if (f && !(d->flags & DEV_REGULAR) && !ret)
if (!d || (f && !(d->flags & DEV_REGULAR) && !(f->passes_filter(f, d))))
return NULL;
log_debug_devs("%s: Using device (%d:%d)", dev_name(d), (int) MAJOR(d->dev), (int) MINOR(d->dev));
return d;
}
@@ -1450,7 +1450,6 @@ struct device *dev_cache_get_by_devt(dev_t dev, struct dev_filter *f)
const char *sysfs_dir;
struct stat info;
struct device *d = _dev_cache_seek_devt(dev);
int ret;
if (d && (d->flags & DEV_REGULAR))
return d;
@@ -1466,40 +1465,31 @@ struct device *dev_cache_get_by_devt(dev_t dev, struct dev_filter *f)
}
if (lstat(path, &info)) {
log_debug("No sysfs entry for %d:%d errno %d at %s.",
(int)MAJOR(dev), (int)MINOR(dev), errno, path);
log_debug("No sysfs entry for %d:%d.",
(int)MAJOR(dev), (int)MINOR(dev));
return NULL;
}
}
dev_cache_scan();
_full_scan(0);
d = _dev_cache_seek_devt(dev);
}
if (!d)
return NULL;
if (d->flags & DEV_REGULAR)
return d;
if (!f)
return d;
ret = f->passes_filter(f, d);
if (ret == -EAGAIN) {
log_debug_devs("get device by number defer filter %s", dev_name(d));
d->flags |= DEV_FILTER_AFTER_SCAN;
ret = 1;
}
if (ret)
return d;
return NULL;
return (d && (!f || (d->flags & DEV_REGULAR) ||
f->passes_filter(f, d))) ? d : NULL;
}
struct dev_iter *dev_iter_create(struct dev_filter *f, int unused)
void dev_cache_full_scan(struct dev_filter *f)
{
if (f && f->wipe) {
f->wipe(f); /* might call _full_scan(1) */
if (!full_scan_done())
_full_scan(1);
} else
_full_scan(1);
}
struct dev_iter *dev_iter_create(struct dev_filter *f, int dev_scan)
{
struct dev_iter *di = dm_malloc(sizeof(*di));
@@ -1508,6 +1498,13 @@ struct dev_iter *dev_iter_create(struct dev_filter *f, int unused)
return NULL;
}
if (dev_scan && !trust_cache()) {
/* Flag gets reset between each command */
if (!full_scan_done())
dev_cache_full_scan(f);
} else
_full_scan(0);
di->current = btree_first(_cache.devices);
di->filter = f;
if (di->filter)
@@ -1532,27 +1529,13 @@ static struct device *_iter_next(struct dev_iter *iter)
struct device *dev_iter_get(struct dev_iter *iter)
{
struct dev_filter *f;
int ret;
while (iter->current) {
struct device *d = _iter_next(iter);
ret = 1;
f = iter->filter;
if (f && !(d->flags & DEV_REGULAR)) {
ret = f->passes_filter(f, d);
if (ret == -EAGAIN) {
log_debug_devs("get device by iter defer filter %s", dev_name(d));
d->flags |= DEV_FILTER_AFTER_SCAN;
ret = 1;
}
}
if (!f || (d->flags & DEV_REGULAR) || ret)
if (!iter->filter || (d->flags & DEV_REGULAR) ||
iter->filter->passes_filter(iter->filter, d)) {
log_debug_devs("%s: Using device (%d:%d)", dev_name(d), (int) MAJOR(d->dev), (int) MINOR(d->dev));
return d;
}
}
return NULL;

View File

@@ -46,8 +46,10 @@ int dev_cache_exit(void);
*/
int dev_cache_check_for_open_devices(void);
void dev_cache_scan(void);
/* Trigger(1) or avoid(0) a scan */
void dev_cache_scan(int do_scan);
int dev_cache_has_scanned(void);
void dev_cache_full_scan(struct dev_filter *f);
int dev_cache_add_dir(const char *path);
int dev_cache_add_loopfile(const char *path);
@@ -64,7 +66,7 @@ void dev_set_preferred_name(struct dm_str_list *sl, struct device *dev);
* Object for iterating through the cache.
*/
struct dev_iter;
struct dev_iter *dev_iter_create(struct dev_filter *f, int unused);
struct dev_iter *dev_iter_create(struct dev_filter *f, int dev_scan);
void dev_iter_destroy(struct dev_iter *iter);
struct device *dev_iter_get(struct dev_iter *iter);

View File

@@ -16,6 +16,7 @@
#include "lib.h"
#include "device.h"
#include "metadata.h"
#include "lvmcache.h"
#include "memlock.h"
#include "locking.h"
@@ -52,6 +53,7 @@
# endif
#endif
static DM_LIST_INIT(_open_devices);
static unsigned _dev_size_seqno = 1;
static const char *_reasons[] = {
@@ -197,7 +199,7 @@ int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, un
*physical_block_size = (unsigned int) dev->phys_block_size;
*block_size = (unsigned int) dev->block_size;
out:
if (needs_open && !dev_close_immediate(dev))
if (needs_open && !dev_close(dev))
stack;
return r;
@@ -328,8 +330,6 @@ static int _dev_get_size_file(struct device *dev, uint64_t *size)
static int _dev_get_size_dev(struct device *dev, uint64_t *size)
{
const char *name = dev_name(dev);
int fd = dev->bcache_fd;
int do_close = 0;
if (dev->size_seqno == _dev_size_seqno) {
log_very_verbose("%s: using cached size %" PRIu64 " sectors",
@@ -338,16 +338,12 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
return 1;
}
if (fd <= 0) {
if (!dev_open_readonly(dev))
return_0;
fd = dev_fd(dev);
do_close = 1;
}
if (!dev_open_readonly(dev))
return_0;
if (ioctl(fd, BLKGETSIZE64, size) < 0) {
if (ioctl(dev_fd(dev), BLKGETSIZE64, size) < 0) {
log_sys_error("ioctl BLKGETSIZE64", name);
if (do_close && !dev_close_immediate(dev))
if (!dev_close(dev))
log_sys_error("close", name);
return 0;
}
@@ -356,11 +352,11 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
dev->size = *size;
dev->size_seqno = _dev_size_seqno;
log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
if (do_close && !dev_close_immediate(dev))
if (!dev_close(dev))
log_sys_error("close", name);
log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
return 1;
}
@@ -378,7 +374,7 @@ static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
if (ioctl(dev->fd, BLKRAGET, &read_ahead_long) < 0) {
log_sys_error("ioctl BLKRAGET", dev_name(dev));
if (!dev_close_immediate(dev))
if (!dev_close(dev))
stack;
return 0;
}
@@ -389,7 +385,7 @@ static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
log_very_verbose("%s: read_ahead is %u sectors",
dev_name(dev), *read_ahead);
if (!dev_close_immediate(dev))
if (!dev_close(dev))
stack;
return 1;
@@ -410,13 +406,13 @@ static int _dev_discard_blocks(struct device *dev, uint64_t offset_bytes, uint64
if (ioctl(dev->fd, BLKDISCARD, &discard_range) < 0) {
log_error("%s: BLKDISCARD ioctl at offset %" PRIu64 " size %" PRIu64 " failed: %s.",
dev_name(dev), offset_bytes, size_bytes, strerror(errno));
if (!dev_close_immediate(dev))
if (!dev_close(dev))
stack;
/* It doesn't matter if discard failed, so return success. */
return 1;
}
if (!dev_close_immediate(dev))
if (!dev_close(dev))
stack;
return 1;
@@ -595,6 +591,8 @@ int dev_open_flags(struct device *dev, int flags, int direct, int quiet)
if ((flags & O_CREAT) && !(flags & O_TRUNC))
dev->end = lseek(dev->fd, (off_t) 0, SEEK_END);
dm_list_add(&_open_devices, &dev->open_list);
log_debug_devs("Opened %s %s%s%s", dev_name(dev),
dev->flags & DEV_OPENED_RW ? "RW" : "RO",
dev->flags & DEV_OPENED_EXCL ? " O_EXCL" : "",
@@ -631,12 +629,17 @@ int dev_open_readonly_quiet(struct device *dev)
int dev_test_excl(struct device *dev)
{
int flags = 0;
int flags;
int r;
flags = vg_write_lock_held() ? O_RDWR : O_RDONLY;
flags |= O_EXCL;
flags |= O_RDWR;
return dev_open_flags(dev, flags, 1, 1);
r = dev_open_flags(dev, flags, 1, 1);
if (r)
dev_close_immediate(dev);
return r;
}
static void _close(struct device *dev)
@@ -646,6 +649,7 @@ static void _close(struct device *dev)
dev->fd = -1;
dev->phys_block_size = -1;
dev->block_size = -1;
dm_list_del(&dev->open_list);
log_debug_devs("Closed %s", dev_name(dev));
@@ -655,6 +659,7 @@ static void _close(struct device *dev)
static int _dev_close(struct device *dev, int immediate)
{
if (dev->fd < 0) {
log_error("Attempt to close device '%s' "
"which is not open.", dev_name(dev));
@@ -673,7 +678,9 @@ static int _dev_close(struct device *dev, int immediate)
log_debug_devs("%s: Immediate close attempt while still referenced",
dev_name(dev));
if (immediate || (dev->open_count < 1))
/* Close unless device is known to belong to a locked VG */
if (immediate ||
(dev->open_count < 1 && !lvmcache_pvid_is_locked(dev->pvid)))
_close(dev);
return 1;
@@ -689,6 +696,18 @@ int dev_close_immediate(struct device *dev)
return _dev_close(dev, 1);
}
void dev_close_all(void)
{
struct dm_list *doh, *doht;
struct device *dev;
dm_list_iterate_safe(doh, doht, &_open_devices) {
dev = dm_list_struct_base(doh, struct device, open_list);
if (dev->open_count < 1)
_close(dev);
}
}
static inline int _dev_is_valid(struct device *dev)
{
return (dev->max_error_count == NO_DEV_ERROR_COUNT_LIMIT ||

View File

@@ -18,22 +18,27 @@
#define LUKS_SIGNATURE "LUKS\xba\xbe"
#define LUKS_SIGNATURE_SIZE 6
int dev_is_luks(struct device *dev, uint64_t *offset_found, int full)
int dev_is_luks(struct device *dev, uint64_t *offset_found)
{
char buf[LUKS_SIGNATURE_SIZE];
int ret = -1;
if (!scan_bcache)
return -EAGAIN;
if (!dev_open_readonly(dev)) {
stack;
return -1;
}
if (offset_found)
*offset_found = 0;
if (!dev_read_bytes(dev, 0, LUKS_SIGNATURE_SIZE, buf))
if (!dev_read(dev, 0, LUKS_SIGNATURE_SIZE, DEV_IO_SIGNATURES, buf))
goto_out;
ret = memcmp(buf, LUKS_SIGNATURE, LUKS_SIGNATURE_SIZE) ? 0 : 1;
out:
if (!dev_close(dev))
stack;
return ret;
}

View File

@@ -1,174 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "dev-type.h"
#include "xlate.h"
/*
* These lvm1 structs just used NAME_LEN in the previous format1 lvm2 code, but
* NAME_LEN was defined as 128 in generic lvm2 code that was not lvm1-specific
* and not disk-format-specific.
*/
#define LVM1_NAME_LEN 128
struct data_area {
uint32_t base;
uint32_t size;
} __attribute__ ((packed));
struct pv_disk {
int8_t id[2];
uint16_t version; /* lvm version */
struct data_area pv_on_disk;
struct data_area vg_on_disk;
struct data_area pv_uuidlist_on_disk;
struct data_area lv_on_disk;
struct data_area pe_on_disk;
int8_t pv_uuid[LVM1_NAME_LEN];
int8_t vg_name[LVM1_NAME_LEN];
int8_t system_id[LVM1_NAME_LEN]; /* for vgexport/vgimport */
uint32_t pv_major;
uint32_t pv_number;
uint32_t pv_status;
uint32_t pv_allocatable;
uint32_t pv_size;
uint32_t lv_cur;
uint32_t pe_size;
uint32_t pe_total;
uint32_t pe_allocated;
/* only present on version == 2 pv's */
uint32_t pe_start;
} __attribute__ ((packed));
int dev_is_lvm1(struct device *dev, char *buf, int buflen)
{
struct pv_disk *pvd = (struct pv_disk *) buf;
uint32_t version;
int ret;
version = xlate16(pvd->version);
if (pvd->id[0] == 'H' && pvd->id[1] == 'M' &&
(version == 1 || version == 2))
ret = 1;
else
ret = 0;
return ret;
}
#define POOL_MAGIC 0x011670
#define POOL_NAME_SIZE 256
#define NSPMajorVersion 4
#define NSPMinorVersion 1
#define NSPUpdateLevel 3
/* When checking for version matching, the first two numbers **
** are important for metadata formats, a.k.a pool labels. **
** All the numbers are important when checking if the user **
** space tools match up with the kernel module............. */
#define POOL_VERSION (NSPMajorVersion << 16 | \
NSPMinorVersion << 8 | \
NSPUpdateLevel)
struct pool_disk {
uint64_t pl_magic; /* Pool magic number */
uint64_t pl_pool_id; /* Unique pool identifier */
char pl_pool_name[POOL_NAME_SIZE]; /* Name of pool */
uint32_t pl_version; /* Pool version */
uint32_t pl_subpools; /* Number of subpools in this pool */
uint32_t pl_sp_id; /* Subpool number within pool */
uint32_t pl_sp_devs; /* Number of data partitions in this subpool */
uint32_t pl_sp_devid; /* Partition number within subpool */
uint32_t pl_sp_type; /* Partition type */
uint64_t pl_blocks; /* Number of blocks in this partition */
uint32_t pl_striping; /* Striping size within subpool */
/*
* If the number of DMEP devices is zero, then the next field **
* ** (pl_sp_dmepid) becomes the subpool ID for redirection. In **
* ** other words, if this subpool does not have the capability **
* ** to do DMEP, then it must specify which subpool will do it **
* ** in it's place
*/
/*
* While the next 3 field are no longer used, they must stay to keep **
* ** backward compatibility...........................................
*/
uint32_t pl_sp_dmepdevs;/* Number of dmep devices in this subpool */
uint32_t pl_sp_dmepid; /* Dmep device number within subpool */
uint32_t pl_sp_weight; /* if dmep dev, pref to using it */
uint32_t pl_minor; /* the pool minor number */
uint32_t pl_padding; /* reminder - think about alignment */
/*
* Even though we're zeroing out 8k at the front of the disk before
* writing the label, putting this in
*/
char pl_reserve[184]; /* bump the structure size out to 512 bytes */
};
#define CPIN_8(x, y, z) {memcpy((x), (y), (z));}
#define CPIN_16(x, y) {(x) = xlate16_be((y));}
#define CPIN_32(x, y) {(x) = xlate32_be((y));}
#define CPIN_64(x, y) {(x) = xlate64_be((y));}
static void pool_label_in(struct pool_disk *pl, void *buf)
{
struct pool_disk *bufpl = (struct pool_disk *) buf;
CPIN_64(pl->pl_magic, bufpl->pl_magic);
CPIN_64(pl->pl_pool_id, bufpl->pl_pool_id);
CPIN_8(pl->pl_pool_name, bufpl->pl_pool_name, POOL_NAME_SIZE);
CPIN_32(pl->pl_version, bufpl->pl_version);
CPIN_32(pl->pl_subpools, bufpl->pl_subpools);
CPIN_32(pl->pl_sp_id, bufpl->pl_sp_id);
CPIN_32(pl->pl_sp_devs, bufpl->pl_sp_devs);
CPIN_32(pl->pl_sp_devid, bufpl->pl_sp_devid);
CPIN_32(pl->pl_sp_type, bufpl->pl_sp_type);
CPIN_64(pl->pl_blocks, bufpl->pl_blocks);
CPIN_32(pl->pl_striping, bufpl->pl_striping);
CPIN_32(pl->pl_sp_dmepdevs, bufpl->pl_sp_dmepdevs);
CPIN_32(pl->pl_sp_dmepid, bufpl->pl_sp_dmepid);
CPIN_32(pl->pl_sp_weight, bufpl->pl_sp_weight);
CPIN_32(pl->pl_minor, bufpl->pl_minor);
CPIN_32(pl->pl_padding, bufpl->pl_padding);
CPIN_8(pl->pl_reserve, bufpl->pl_reserve, 184);
}
int dev_is_pool(struct device *dev, char *buf, int buflen)
{
struct pool_disk pd;
int ret;
pool_label_in(&pd, buf);
/* can ignore 8 rightmost bits for ondisk format check */
if ((pd.pl_magic == POOL_MAGIC) &&
(pd.pl_version >> 8 == POOL_VERSION >> 8))
ret = 1;
else
ret = 0;
return ret;
}

View File

@@ -37,12 +37,9 @@ static int _dev_has_md_magic(struct device *dev, uint64_t sb_offset)
uint32_t md_magic;
/* Version 1 is little endian; version 0.90.0 is machine endian */
if (!dev_read_bytes(dev, sb_offset, sizeof(uint32_t), &md_magic))
return_0;
if ((md_magic == MD_SB_MAGIC) ||
((MD_SB_MAGIC != xlate32(MD_SB_MAGIC)) && (md_magic == xlate32(MD_SB_MAGIC))))
if (dev_read(dev, sb_offset, sizeof(uint32_t), DEV_IO_SIGNATURES, &md_magic) &&
((md_magic == MD_SB_MAGIC) ||
((MD_SB_MAGIC != xlate32(MD_SB_MAGIC)) && (md_magic == xlate32(MD_SB_MAGIC)))))
return 1;
return 0;
@@ -112,14 +109,11 @@ static int _udev_dev_is_md(struct device *dev)
/*
* Returns -1 on error
*/
static int _native_dev_is_md(struct device *dev, uint64_t *offset_found, int full)
static int _native_dev_is_md(struct device *dev, uint64_t *offset_found)
{
int ret = 1;
md_minor_version_t minor;
uint64_t size, sb_offset;
int ret;
if (!scan_bcache)
return -EAGAIN;
if (!dev_get_size(dev, &size)) {
stack;
@@ -129,73 +123,38 @@ static int _native_dev_is_md(struct device *dev, uint64_t *offset_found, int ful
if (size < MD_RESERVED_SECTORS * 2)
return 0;
/*
* Old md versions locate the magic number at the end of the device.
* Those checks can't be satisfied with the initial bcache data, and
* would require an extra read i/o at the end of every device. Issuing
* an extra read to every device in every command, just to check for
* the old md format is a bad tradeoff.
*
* When "full" is set, we check a the start and end of the device for
* md magic numbers. When "full" is not set, we only check at the
* start of the device for the magic numbers. We decide for each
* command if it should do a full check (cmd->use_full_md_check),
* and set it for commands that could possibly write to an md dev
* (pvcreate/vgcreate/vgextend).
*
* For old md versions with magic numbers at the end of devices,
* the md dev components won't be filtered out here when full is 0,
* so they will be scanned, and appear as duplicate PVs in lvmcache.
* The md device itself will be chosen as the primary duplicate,
* and the components are dropped from the list of duplicates in,
* i.e. a kind of post-scan filtering.
*/
if (!full) {
sb_offset = 0;
if (_dev_has_md_magic(dev, sb_offset)) {
log_debug_devs("Found md magic number at offset 0 of %s.", dev_name(dev));
ret = 1;
goto out;
}
sb_offset = 8 << SECTOR_SHIFT;
if (_dev_has_md_magic(dev, sb_offset)) {
log_debug_devs("Found md magic number at offset %d of %s.", (int)sb_offset, dev_name(dev));
ret = 1;
goto out;
}
ret = 0;
goto out;
if (!dev_open_readonly(dev)) {
stack;
return -1;
}
/* Check if it is an md component device. */
/* Version 0.90.0 */
sb_offset = MD_NEW_SIZE_SECTORS(size) << SECTOR_SHIFT;
if (_dev_has_md_magic(dev, sb_offset)) {
ret = 1;
if (_dev_has_md_magic(dev, sb_offset))
goto out;
}
minor = MD_MINOR_VERSION_MIN;
/* Version 1, try v1.0 -> v1.2 */
do {
sb_offset = _v1_sb_offset(size, minor);
if (_dev_has_md_magic(dev, sb_offset)) {
ret = 1;
if (_dev_has_md_magic(dev, sb_offset))
goto out;
}
} while (++minor <= MD_MINOR_VERSION_MAX);
ret = 0;
out:
if (!dev_close(dev))
stack;
if (ret && offset_found)
*offset_found = sb_offset;
return ret;
}
int dev_is_md(struct device *dev, uint64_t *offset_found, int full)
int dev_is_md(struct device *dev, uint64_t *offset_found)
{
/*
@@ -204,7 +163,7 @@ int dev_is_md(struct device *dev, uint64_t *offset_found, int full)
* information is not in udev db.
*/
if ((dev->ext.src == DEV_EXT_NONE) || offset_found)
return _native_dev_is_md(dev, offset_found, full);
return _native_dev_is_md(dev, offset_found);
if (dev->ext.src == DEV_EXT_UDEV)
return _udev_dev_is_md(dev);

View File

@@ -35,21 +35,23 @@ static int _swap_detect_signature(const char *buf)
return 0;
}
int dev_is_swap(struct device *dev, uint64_t *offset_found, int full)
int dev_is_swap(struct device *dev, uint64_t *offset_found)
{
char buf[10];
uint64_t size;
unsigned page;
int ret = 0;
if (!scan_bcache)
return -EAGAIN;
if (!dev_get_size(dev, &size)) {
stack;
return -1;
}
if (!dev_open_readonly(dev)) {
stack;
return -1;
}
for (page = 0x1000; page <= MAX_PAGESIZE; page <<= 1) {
/*
* skip 32k pagesize since this does not seem to be supported
@@ -58,7 +60,8 @@ int dev_is_swap(struct device *dev, uint64_t *offset_found, int full)
continue;
if (size < (page >> SECTOR_SHIFT))
break;
if (!dev_read_bytes(dev, page - SIGNATURE_SIZE, SIGNATURE_SIZE, buf)) {
if (!dev_read(dev, page - SIGNATURE_SIZE,
SIGNATURE_SIZE, DEV_IO_SIGNATURES, buf)) {
ret = -1;
break;
}
@@ -70,6 +73,9 @@ int dev_is_swap(struct device *dev, uint64_t *offset_found, int full)
}
}
if (!dev_close(dev))
stack;
return ret;
}

View File

@@ -17,8 +17,6 @@
#include "xlate.h"
#include "config.h"
#include "metadata.h"
#include "bcache.h"
#include "label.h"
#include <libgen.h>
#include <ctype.h>
@@ -215,9 +213,6 @@ int dev_subsystem_part_major(struct dev_types *dt, struct device *dev)
if (MAJOR(dev->dev) == dt->device_mapper_major)
return 1;
if (MAJOR(dev->dev) == dt->md_major)
return 1;
if (MAJOR(dev->dev) == dt->drbd_major)
return 1;
@@ -368,7 +363,7 @@ static int _has_partition_table(struct device *dev)
uint16_t magic;
} __attribute__((packed)) buf; /* sizeof() == SECTOR_SIZE */
if (!dev_read_bytes(dev, UINT64_C(0), sizeof(buf), &buf))
if (!dev_read(dev, UINT64_C(0), sizeof(buf), DEV_IO_SIGNATURES, &buf))
return_0;
/* FIXME Check for other types of partition table too */
@@ -437,9 +432,6 @@ static int _native_dev_is_partitioned(struct dev_types *dt, struct device *dev)
{
int r;
if (!scan_bcache)
return -EAGAIN;
if (!_is_partitionable(dt, dev))
return 0;
@@ -447,8 +439,17 @@ static int _native_dev_is_partitioned(struct dev_types *dt, struct device *dev)
if ((MAJOR(dev->dev) == dt->dasd_major) && dasd_is_cdl_formatted(dev))
return 1;
if (!dev_open_readonly_quiet(dev)) {
log_debug_devs("%s: failed to open device, considering device "
"is partitioned", dev_name(dev));
return 1;
}
r = _has_partition_table(dev);
if (!dev_close(dev))
stack;
return r;
}
@@ -674,7 +675,7 @@ static int _blkid_wipe(blkid_probe probe, struct device *dev, const char *name,
} else
log_verbose(_msg_wiping, type, name);
if (!dev_write_zeros(dev, offset_value, len)) {
if (!dev_set(dev, offset_value, len, DEV_IO_SIGNATURES, 0)) {
log_error("Failed to wipe %s signature on %s.", type, name);
return 0;
}
@@ -747,12 +748,12 @@ out:
static int _wipe_signature(struct device *dev, const char *type, const char *name,
int wipe_len, int yes, force_t force, int *wiped,
int (*signature_detection_fn)(struct device *dev, uint64_t *offset_found, int full))
int (*signature_detection_fn)(struct device *dev, uint64_t *offset_found))
{
int wipe;
uint64_t offset_found;
wipe = signature_detection_fn(dev, &offset_found, 1);
wipe = signature_detection_fn(dev, &offset_found);
if (wipe == -1) {
log_error("Fatal error while trying to detect %s on %s.",
type, name);
@@ -771,7 +772,7 @@ static int _wipe_signature(struct device *dev, const char *type, const char *nam
}
log_print_unless_silent("Wiping %s on %s.", type, name);
if (!dev_write_zeros(dev, offset_found, wipe_len)) {
if (!dev_set(dev, offset_found, wipe_len, DEV_IO_SIGNATURES, 0)) {
log_error("Failed to wipe %s on %s.", type, name);
return 0;
}

View File

@@ -17,7 +17,6 @@
#include "device.h"
#include "display.h"
#include "label.h"
#define NUMBER_OF_MAJORS 4096
@@ -57,15 +56,12 @@ const char *dev_subsystem_name(struct dev_types *dt, struct device *dev);
int major_is_scsi_device(struct dev_types *dt, int major);
/* Signature/superblock recognition with position returned where found. */
int dev_is_md(struct device *dev, uint64_t *sb, int full);
int dev_is_swap(struct device *dev, uint64_t *signature, int full);
int dev_is_luks(struct device *dev, uint64_t *signature, int full);
int dev_is_md(struct device *dev, uint64_t *sb);
int dev_is_swap(struct device *dev, uint64_t *signature);
int dev_is_luks(struct device *dev, uint64_t *signature);
int dasd_is_cdl_formatted(struct device *dev);
int udev_dev_is_mpath_component(struct device *dev);
int dev_is_lvm1(struct device *dev, char *buf, int buflen);
int dev_is_pool(struct device *dev, char *buf, int buflen);
/* Signature wiping. */
#define TYPE_LVM1_MEMBER 0x001
#define TYPE_LVM2_MEMBER 0x002

View File

@@ -31,10 +31,6 @@
#define DEV_USED_FOR_LV 0x00000100 /* Is device used for an LV */
#define DEV_ASSUMED_FOR_LV 0x00000200 /* Is device assumed for an LV */
#define DEV_NOT_O_NOATIME 0x00000400 /* Don't use O_NOATIME */
#define DEV_IN_BCACHE 0x00000800 /* dev fd is open and used in bcache */
#define DEV_BCACHE_EXCL 0x00001000 /* bcache_fd should be open EXCL */
#define DEV_FILTER_AFTER_SCAN 0x00002000 /* apply filter after bcache has data */
#define DEV_FILTER_OUT_SCAN 0x00004000 /* filtered out during label scan */
/*
* Support for external device info.
@@ -69,13 +65,12 @@ struct device {
int phys_block_size;
int block_size;
int read_ahead;
int bcache_fd;
uint32_t flags;
unsigned size_seqno;
uint64_t size;
uint64_t end;
struct dm_list open_list;
struct dev_ext ext;
const char *duplicate_prefer_reason;
const char *vgid; /* if device is an LV */
const char *lvid; /* if device is an LV */
@@ -144,6 +139,7 @@ int dev_open_readonly_buffered(struct device *dev);
int dev_open_readonly_quiet(struct device *dev);
int dev_close(struct device *dev);
int dev_close_immediate(struct device *dev);
void dev_close_all(void);
int dev_test_excl(struct device *dev);
int dev_fd(struct device *dev);

View File

@@ -705,10 +705,13 @@ void vgdisplay_full(const struct volume_group *vg)
log_print("--- Volume group ---");
log_print("VG Name %s", vg->name);
log_print("System ID %s", (vg->system_id && *vg->system_id) ? vg->system_id : "");
log_print("System ID %s", (vg->system_id && *vg->system_id) ? vg->system_id : vg->lvm1_system_id ? : "");
log_print("Format %s", vg->fid->fmt->name);
log_print("Metadata Areas %d", vg_mda_count(vg));
log_print("Metadata Sequence No %d", vg->seqno);
if (vg->fid->fmt->features & FMT_MDAS) {
log_print("Metadata Areas %d",
vg_mda_count(vg));
log_print("Metadata Sequence No %d", vg->seqno);
}
access_str = vg->status & (LVM_READ | LVM_WRITE);
log_print("VG Access %s%s%s%s",
access_str == (LVM_READ | LVM_WRITE) ? "read/write" : "",

View File

@@ -15,19 +15,14 @@
#include "lib.h"
#include "filter.h"
#include "device.h"
static int _and_p(struct dev_filter *f, struct device *dev)
{
struct dev_filter **filters;
int ret;
for (filters = (struct dev_filter **) f->private; *filters; ++filters) {
ret = (*filters)->passes_filter(*filters, dev);
if (!ret)
for (filters = (struct dev_filter **) f->private; *filters; ++filters)
if (!(*filters)->passes_filter(*filters, dev))
return 0; /* No 'stack': a filter, not an error. */
}
return 1;
}

View File

@@ -20,88 +20,15 @@
#define MSG_SKIPPING "%s: Skipping md component device"
/*
* The purpose of these functions is to ignore md component devices,
* e.g. if /dev/md0 is a raid1 composed of /dev/loop0 and /dev/loop1,
* lvm wants to deal with md0 and ignore loop0 and loop1. md0 should
* pass the filter, and loop0,loop1 should not pass the filter so lvm
* will ignore them.
*
* (This is assuming lvm.conf md_component_detection=1.)
*
* If lvm does *not* ignore the components, then lvm will read lvm
* labels from the md dev and from the component devs, and will see
* them all as duplicates of each other. LVM duplicate resolution
* will then kick in and keep the md dev around to use and ignore
* the components.
*
* It is better to exclude the components as early as possible during
* lvm processing, ideally before lvm even looks for labels on the
* components, so that duplicate resolution can be avoided. There are
* a number of ways that md components can be excluded earlier than
* the duplicate resolution phase:
*
* - When external_device_info_source="udev", lvm discovers a device is
* an md component by asking udev during the initial filtering phase.
* However, lvm's default is to not use udev for this. The
* alternative is "native" detection in which lvm tries to detect
* md components itself.
*
* - When using native detection, lvm's md filter looks for the md
* superblock at the start of devices. It will see the md superblock
* on the components, exclude them in the md filter, and avoid
* handling them later in duplicate resolution.
*
* - When using native detection, lvm's md filter will not detect
* components when the md device has an older superblock version that
* places the superblock at the end of the device. This case will
* fall back to duplicate resolution to exclude components.
*
* A variation of the description above occurs for lvm commands that
* intend to create new PVs on devices (pvcreate, vgcreate, vgextend).
* For these commands, the native md filter also reads the end of all
* devices to check for the odd md superblocks.
*
* (The reason that external_device_info_source is not set to udev by
* default is that there have be issues with udev not being promptly
* or reliably updated about md state changes, causing the udev info
* that lvm uses to be occasionally wrong.)
*/
/*
* Returns 0 if:
* the device is an md component and it should be ignored.
*
* Returns 1 if:
* the device is not md component and should not be ignored.
*
* The actual md device will pass this filter and should be used,
* it is the md component devices that we are trying to exclude
* that will not pass.
*/
static int _passes_md_filter(struct device *dev, int full)
static int _ignore_md(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
int ret;
/*
* When md_component_dectection=0, don't even try to skip md
* components.
*/
if (!md_filtering())
return 1;
ret = dev_is_md(dev, NULL, full);
if (ret == -EAGAIN) {
/* let pass, call again after scan */
dev->flags |= DEV_FILTER_AFTER_SCAN;
log_debug_devs("filter md deferred %s", dev_name(dev));
return 1;
}
if (ret == 0)
return 1;
ret = dev_is_md(dev, NULL);
if (ret == 1) {
if (dev->ext.src == DEV_EXT_NONE)
@@ -121,18 +48,6 @@ static int _passes_md_filter(struct device *dev, int full)
return 1;
}
static int _passes_md_filter_lite(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
return _passes_md_filter(dev, 0);
}
static int _passes_md_filter_full(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
return _passes_md_filter(dev, 1);
}
static void _destroy(struct dev_filter *f)
{
if (f->use_count)
@@ -141,7 +56,7 @@ static void _destroy(struct dev_filter *f)
dm_free(f);
}
struct dev_filter *md_filter_create(struct cmd_context *cmd, struct dev_types *dt)
struct dev_filter *md_filter_create(struct dev_types *dt)
{
struct dev_filter *f;
@@ -150,18 +65,7 @@ struct dev_filter *md_filter_create(struct cmd_context *cmd, struct dev_types *d
return NULL;
}
/*
* FIXME: for commands that want a full md check (pvcreate, vgcreate,
* vgextend), we do an extra read at the end of every device that the
* filter looks at. This isn't necessary; we only need to do the full
* md check on the PVs that these commands are trying to use.
*/
if (cmd->use_full_md_check)
f->passes_filter = _passes_md_filter_full;
else
f->passes_filter = _passes_md_filter_lite;
f->passes_filter = _ignore_md;
f->destroy = _destroy;
f->use_count = 0;
f->private = dt;

View File

@@ -21,18 +21,8 @@
static int _passes_partitioned_filter(struct dev_filter *f, struct device *dev)
{
struct dev_types *dt = (struct dev_types *) f->private;
int ret;
ret = dev_is_partitioned(dt, dev);
if (ret == -EAGAIN) {
/* let pass, call again after scan */
log_debug_devs("filter partitioned deferred %s", dev_name(dev));
dev->flags |= DEV_FILTER_AFTER_SCAN;
return 1;
}
if (ret) {
if (dev_is_partitioned(dt, dev)) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else

View File

@@ -26,30 +26,6 @@ struct pfilter {
struct dev_types *dt;
};
/*
* The persistent filter is filter layer that sits above the other filters and
* caches the final result of those other filters. When a device is first
* checked against filters, it will not be in this cache, so this filter will
* pass the device down to the other filters to check it. The other filters
* will run and either include the device (good/pass) or exclude the device
* (bad/fail). That good or bad result propagates up through this filter which
* saves the result. The next time some code checks the filters against the
* device, this persistent/cache filter is checked first. This filter finds
* the previous result in its cache and returns it without reevaluating the
* other real filters.
*
* FIXME: a cache like this should not be needed. The fact it's needed is a
* symptom of code that should be fixed to not reevaluate filters multiple
* times. A device should be checked against the filter once, and then not
* need to be checked again. With scanning now controlled, we could probably
* do this.
*
* FIXME: "persistent" isn't a great name for this caching filter. This filter
* at one time saved its cache results to a file, which is how it got the name.
* That .cache file does not work well, causes problems, and is no longer used
* by default. The old code for it should be removed.
*/
/*
* The hash table holds one of these two states
* against each entry.
@@ -72,7 +48,11 @@ static void _persistent_filter_wipe(struct dev_filter *f)
{
struct pfilter *pf = (struct pfilter *) f->private;
log_verbose("Wiping cache of LVM-capable devices");
dm_hash_wipe(pf->devices);
/* Trigger complete device scan */
dev_cache_scan(1);
}
static int _read_array(struct pfilter *pf, struct dm_config_tree *cft,
@@ -146,6 +126,15 @@ int persistent_filter_load(struct dev_filter *f, struct dm_config_tree **cft_out
/* _read_array(pf, cft, "persistent_filter_cache/invalid_devices",
PF_BAD_DEVICE); */
/* Did we find anything? */
if (dm_hash_get_num_entries(pf->devices)) {
/* We populated dev_cache ourselves */
dev_cache_scan(0);
if (!dev_cache_index_devs())
stack;
r = 1;
}
log_very_verbose("Loaded persistent filter cache from %s", pf->file);
out:
@@ -288,51 +277,27 @@ static int _lookup_p(struct dev_filter *f, struct device *dev)
struct pfilter *pf = (struct pfilter *) f->private;
void *l = dm_hash_lookup(pf->devices, dev_name(dev));
struct dm_str_list *sl;
int pass = 1;
/* Cached bad, skip dev */
/* Cached BAD? */
if (l == PF_BAD_DEVICE) {
log_debug_devs("%s: filter cache skipping (cached bad)", dev_name(dev));
log_debug_devs("%s: Skipping (cached)", dev_name(dev));
return 0;
}
/* Cached good, use dev */
if (l == PF_GOOD_DEVICE) {
log_debug_devs("%s: filter cache using (cached good)", dev_name(dev));
return 1;
/* Test dm devices every time, so cache them as GOOD. */
if (MAJOR(dev->dev) == pf->dt->device_mapper_major) {
if (!l)
dm_list_iterate_items(sl, &dev->aliases)
if (!dm_hash_insert(pf->devices, sl->str, PF_GOOD_DEVICE)) {
log_error("Failed to hash device to filter.");
return 0;
}
return pf->real->passes_filter(pf->real, dev);
}
/* Uncached, check filters and cache the result */
/* Uncached */
if (!l) {
dev->flags &= ~DEV_FILTER_AFTER_SCAN;
pass = pf->real->passes_filter(pf->real, dev);
if (!pass) {
/*
* A device that does not pass one filter is excluded
* even if the result of another filter is deferred,
* because the deferred result won't change the exclude.
*/
l = PF_BAD_DEVICE;
} else if ((pass == -EAGAIN) || (dev->flags & DEV_FILTER_AFTER_SCAN)) {
/*
* When the filter result is deferred, we let the device
* pass for now, but do not cache the result. We need to
* rerun the filters later. At that point the final result
* will be cached.
*/
log_debug_devs("filter cache deferred %s", dev_name(dev));
dev->flags |= DEV_FILTER_AFTER_SCAN;
pass = 1;
goto out;
} else if (pass) {
l = PF_GOOD_DEVICE;
}
log_debug_devs("filter caching %s %s", pass ? "good" : "bad", dev_name(dev));
l = pf->real->passes_filter(pf->real, dev) ? PF_GOOD_DEVICE : PF_BAD_DEVICE;
dm_list_iterate_items(sl, &dev->aliases)
if (!dm_hash_insert(pf->devices, sl->str, l)) {
@@ -340,8 +305,8 @@ static int _lookup_p(struct dev_filter *f, struct device *dev)
return 0;
}
}
out:
return pass;
return (l == PF_BAD_DEVICE) ? 0 : 1;
}
static void _persistent_destroy(struct dev_filter *f)

View File

@@ -1,96 +0,0 @@
/*
* Copyright (C) 2004 Luca Berra
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "filter.h"
#ifdef __linux__
#define BUFSIZE 4096
static int _ignore_signature(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
char buf[BUFSIZE];
int ret = 0;
if (!scan_bcache) {
/* let pass, call again after scan */
log_debug_devs("filter signature deferred %s", dev_name(dev));
dev->flags |= DEV_FILTER_AFTER_SCAN;
return 1;
}
memset(buf, 0, BUFSIZE);
if (!dev_read_bytes(dev, 0, BUFSIZE, buf)) {
log_debug_devs("%s: Skipping: error in signature detection",
dev_name(dev));
ret = 0;
goto out;
}
if (dev_is_lvm1(dev, buf, BUFSIZE)) {
log_debug_devs("%s: Skipping lvm1 device", dev_name(dev));
ret = 0;
goto out;
}
if (dev_is_pool(dev, buf, BUFSIZE)) {
log_debug_devs("%s: Skipping gfs-pool device", dev_name(dev));
ret = 0;
goto out;
}
ret = 1;
out:
return ret;
}
static void _destroy(struct dev_filter *f)
{
if (f->use_count)
log_error(INTERNAL_ERROR "Destroying signature filter while in use %u times.", f->use_count);
dm_free(f);
}
struct dev_filter *signature_filter_create(struct dev_types *dt)
{
struct dev_filter *f;
if (!(f = dm_zalloc(sizeof(*f)))) {
log_error("md filter allocation failed");
return NULL;
}
f->passes_filter = _ignore_signature;
f->destroy = _destroy;
f->use_count = 0;
f->private = dt;
log_debug_devs("signature filter initialised.");
return f;
}
#else
struct dev_filter *signature_filter_create(struct dev_types *dt)
{
return NULL;
}
#endif

View File

@@ -27,6 +27,12 @@ static int _native_check_pv_min_size(struct device *dev)
uint64_t size;
int ret = 0;
/* Check it's accessible */
if (!dev_open_readonly_quiet(dev)) {
log_debug_devs("%s: Skipping: open failed", dev_name(dev));
return 0;
}
/* Check it's not too small */
if (!dev_get_size(dev, &size)) {
log_debug_devs("%s: Skipping: dev_get_size failed", dev_name(dev));
@@ -41,6 +47,9 @@ static int _native_check_pv_min_size(struct device *dev)
ret = 1;
out:
if (!dev_close(dev))
stack;
return ret;
}

View File

@@ -23,7 +23,7 @@
struct dev_filter *composite_filter_create(int n, int use_dev_ext_info, struct dev_filter **filters);
struct dev_filter *lvm_type_filter_create(struct dev_types *dt);
struct dev_filter *md_filter_create(struct cmd_context *cmd, struct dev_types *dt);
struct dev_filter *md_filter_create(struct dev_types *dt);
struct dev_filter *fwraid_filter_create(struct dev_types *dt);
struct dev_filter *mpath_filter_create(struct dev_types *dt);
struct dev_filter *partitioned_filter_create(struct dev_types *dt);
@@ -31,7 +31,6 @@ struct dev_filter *persistent_filter_create(struct dev_types *dt,
struct dev_filter *f,
const char *file);
struct dev_filter *sysfs_filter_create(void);
struct dev_filter *signature_filter_create(struct dev_types *dt);
struct dev_filter *internal_filter_create(void);
int internal_filter_allow(struct dm_pool *mem, struct device *dev);

View File

@@ -0,0 +1 @@
init_format

33
lib/format1/Makefile.in Normal file
View File

@@ -0,0 +1,33 @@
#
# Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES =\
disk-rep.c \
format1.c \
import-export.c \
import-extents.c \
layout.c \
lvm1-label.c \
vg_number.c
LIB_SHARED = liblvm2format1.$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
include $(top_builddir)/make.tmpl
install: install_lvm2_plugin

762
lib/format1/disk-rep.c Normal file
View File

@@ -0,0 +1,762 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "disk-rep.h"
#include "xlate.h"
#include "lvmcache.h"
#include "metadata-exported.h"
#include <fcntl.h>
#define xx16(v) disk->v = xlate16(disk->v)
#define xx32(v) disk->v = xlate32(disk->v)
#define xx64(v) disk->v = xlate64(disk->v)
/*
* Functions to perform the endian conversion
* between disk and core. The same code works
* both ways of course.
*/
static void _xlate_pvd(struct pv_disk *disk)
{
xx16(version);
xx32(pv_on_disk.base);
xx32(pv_on_disk.size);
xx32(vg_on_disk.base);
xx32(vg_on_disk.size);
xx32(pv_uuidlist_on_disk.base);
xx32(pv_uuidlist_on_disk.size);
xx32(lv_on_disk.base);
xx32(lv_on_disk.size);
xx32(pe_on_disk.base);
xx32(pe_on_disk.size);
xx32(pv_major);
xx32(pv_number);
xx32(pv_status);
xx32(pv_allocatable);
xx32(pv_size);
xx32(lv_cur);
xx32(pe_size);
xx32(pe_total);
xx32(pe_allocated);
xx32(pe_start);
}
static void _xlate_lvd(struct lv_disk *disk)
{
xx32(lv_access);
xx32(lv_status);
xx32(lv_open);
xx32(lv_dev);
xx32(lv_number);
xx32(lv_mirror_copies);
xx32(lv_recovery);
xx32(lv_schedule);
xx32(lv_size);
xx32(lv_snapshot_minor);
xx16(lv_chunk_size);
xx16(dummy);
xx32(lv_allocated_le);
xx32(lv_stripes);
xx32(lv_stripesize);
xx32(lv_badblock);
xx32(lv_allocation);
xx32(lv_io_timeout);
xx32(lv_read_ahead);
}
static void _xlate_vgd(struct vg_disk *disk)
{
xx32(vg_number);
xx32(vg_access);
xx32(vg_status);
xx32(lv_max);
xx32(lv_cur);
xx32(lv_open);
xx32(pv_max);
xx32(pv_cur);
xx32(pv_act);
xx32(dummy);
xx32(vgda);
xx32(pe_size);
xx32(pe_total);
xx32(pe_allocated);
xx32(pvg_total);
}
static void _xlate_extents(struct pe_disk *extents, uint32_t count)
{
unsigned i;
for (i = 0; i < count; i++) {
extents[i].lv_num = xlate16(extents[i].lv_num);
extents[i].le_num = xlate16(extents[i].le_num);
}
}
/*
* Handle both minor metadata formats.
*/
static int _munge_formats(struct pv_disk *pvd)
{
uint32_t pe_start;
unsigned b, e;
switch (pvd->version) {
case 1:
pvd->pe_start = ((pvd->pe_on_disk.base +
pvd->pe_on_disk.size) >> SECTOR_SHIFT);
break;
case 2:
pvd->version = 1;
pe_start = pvd->pe_start << SECTOR_SHIFT;
pvd->pe_on_disk.size = pe_start - pvd->pe_on_disk.base;
break;
default:
return 0;
}
/* UUID too long? */
if (pvd->pv_uuid[ID_LEN]) {
/* Retain ID_LEN chars from end */
for (e = ID_LEN; e < sizeof(pvd->pv_uuid); e++) {
if (!pvd->pv_uuid[e]) {
e--;
break;
}
}
for (b = 0; b < ID_LEN; b++) {
pvd->pv_uuid[b] = pvd->pv_uuid[++e - ID_LEN];
/* FIXME Remove all invalid chars */
if (pvd->pv_uuid[b] == '/')
pvd->pv_uuid[b] = '#';
}
memset(&pvd->pv_uuid[ID_LEN], 0, sizeof(pvd->pv_uuid) - ID_LEN);
}
/* If UUID is missing, create one */
if (pvd->pv_uuid[0] == '\0') {
uuid_from_num((char *)pvd->pv_uuid, pvd->pv_number);
pvd->pv_uuid[ID_LEN] = '\0';
}
return 1;
}
/*
* If exported, remove "PV_EXP" from end of VG name
*/
static void _munge_exported_vg(struct pv_disk *pvd)
{
int l;
size_t s;
/* Return if PV not in a VG */
if ((!*pvd->vg_name))
return;
/* FIXME also check vgd->status & VG_EXPORTED? */
l = strlen((char *)pvd->vg_name);
s = sizeof(EXPORTED_TAG);
if (!strncmp((char *)pvd->vg_name + l - s + 1, EXPORTED_TAG, s)) {
pvd->vg_name[l - s + 1] = '\0';
pvd->pv_status |= VG_EXPORTED;
}
}
int munge_pvd(struct device *dev, struct pv_disk *pvd)
{
_xlate_pvd(pvd);
if (pvd->id[0] != 'H' || pvd->id[1] != 'M') {
log_very_verbose("%s does not have a valid LVM1 PV identifier",
dev_name(dev));
return 0;
}
if (!_munge_formats(pvd)) {
log_very_verbose("format1: Unknown metadata version %d "
"found on %s", pvd->version, dev_name(dev));
return 0;
}
/* If VG is exported, set VG name back to the real name */
_munge_exported_vg(pvd);
return 1;
}
static int _read_pvd(struct device *dev, struct pv_disk *pvd)
{
if (!dev_read(dev, UINT64_C(0), sizeof(*pvd), DEV_IO_FMT1, pvd)) {
log_very_verbose("Failed to read PV data from %s",
dev_name(dev));
return 0;
}
return munge_pvd(dev, pvd);
}
static int _read_lvd(struct device *dev, uint64_t pos, struct lv_disk *disk)
{
if (!dev_read(dev, pos, sizeof(*disk), DEV_IO_FMT1, disk))
return_0;
_xlate_lvd(disk);
return 1;
}
int read_vgd(struct device *dev, struct vg_disk *vgd, struct pv_disk *pvd)
{
uint64_t pos = pvd->vg_on_disk.base;
if (!dev_read(dev, pos, sizeof(*vgd), DEV_IO_FMT1, vgd))
return_0;
_xlate_vgd(vgd);
if ((vgd->lv_max > MAX_LV) || (vgd->pv_max > MAX_PV))
return_0;
/* If UUID is missing, create one */
if (vgd->vg_uuid[0] == '\0')
uuid_from_num((char *)vgd->vg_uuid, vgd->vg_number);
return 1;
}
static int _read_uuids(struct disk_list *data)
{
unsigned num_read = 0;
struct uuid_list *ul;
char buffer[NAME_LEN] __attribute__((aligned(8)));
uint64_t pos = data->pvd.pv_uuidlist_on_disk.base;
uint64_t end = pos + data->pvd.pv_uuidlist_on_disk.size;
while (pos < end && num_read < data->vgd.pv_cur) {
if (!dev_read(data->dev, pos, sizeof(buffer), DEV_IO_FMT1, buffer))
return_0;
if (!(ul = dm_pool_alloc(data->mem, sizeof(*ul))))
return_0;
memcpy(ul->uuid, buffer, NAME_LEN);
ul->uuid[NAME_LEN - 1] = '\0';
dm_list_add(&data->uuids, &ul->list);
pos += NAME_LEN;
num_read++;
}
return 1;
}
static int _check_lvd(struct lv_disk *lvd)
{
return !(lvd->lv_name[0] == '\0');
}
static int _read_lvs(struct disk_list *data)
{
unsigned int i, lvs_read = 0;
uint64_t pos;
struct lvd_list *ll;
struct vg_disk *vgd = &data->vgd;
for (i = 0; (i < vgd->lv_max) && (lvs_read < vgd->lv_cur); i++) {
pos = data->pvd.lv_on_disk.base + (i * sizeof(struct lv_disk));
ll = dm_pool_alloc(data->mem, sizeof(*ll));
if (!ll)
return_0;
if (!_read_lvd(data->dev, pos, &ll->lvd))
return_0;
if (!_check_lvd(&ll->lvd))
continue;
lvs_read++;
dm_list_add(&data->lvds, &ll->list);
}
return 1;
}
static int _read_extents(struct disk_list *data)
{
size_t len = sizeof(struct pe_disk) * data->pvd.pe_total;
struct pe_disk *extents = dm_pool_alloc(data->mem, len);
uint64_t pos = data->pvd.pe_on_disk.base;
if (!extents)
return_0;
if (!dev_read(data->dev, pos, len, DEV_IO_FMT1, extents))
return_0;
_xlate_extents(extents, data->pvd.pe_total);
data->extents = extents;
return 1;
}
static void __update_lvmcache(const struct format_type *fmt,
struct disk_list *dl,
struct device *dev, const char *vgid,
unsigned exported)
{
struct lvmcache_info *info;
const char *vgname = *((char *)dl->pvd.vg_name) ?
(char *)dl->pvd.vg_name : fmt->orphan_vg_name;
if (!(info = lvmcache_add(fmt->labeller, (char *)dl->pvd.pv_uuid, dev,
vgname, vgid, exported ? EXPORTED_VG : 0))) {
stack;
return;
}
lvmcache_set_device_size(info, ((uint64_t)xlate32(dl->pvd.pv_size)) << SECTOR_SHIFT);
lvmcache_del_mdas(info);
lvmcache_make_valid(info);
}
static struct disk_list *__read_disk(const struct format_type *fmt,
struct device *dev, struct dm_pool *mem,
const char *vg_name)
{
struct disk_list *dl = dm_pool_zalloc(mem, sizeof(*dl));
const char *name = dev_name(dev);
if (!dl)
return_NULL;
dl->dev = dev;
dl->mem = mem;
dm_list_init(&dl->uuids);
dm_list_init(&dl->lvds);
if (!_read_pvd(dev, &dl->pvd))
goto_bad;
/*
* is it an orphan ?
*/
if (!*dl->pvd.vg_name) {
log_very_verbose("%s is not a member of any format1 VG", name);
__update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0);
return (vg_name) ? NULL : dl;
}
if (!read_vgd(dl->dev, &dl->vgd, &dl->pvd)) {
log_error("Failed to read VG data from PV (%s)", name);
__update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0);
goto bad;
}
if (vg_name && strcmp(vg_name, (char *)dl->pvd.vg_name)) {
log_very_verbose("%s is not a member of the VG %s",
name, vg_name);
__update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0);
goto bad;
}
__update_lvmcache(fmt, dl, dev, (char *)dl->vgd.vg_uuid,
dl->vgd.vg_status & VG_EXPORTED);
if (!_read_uuids(dl)) {
log_error("Failed to read PV uuid list from %s", name);
goto bad;
}
if (!_read_lvs(dl)) {
log_error("Failed to read LV's from %s", name);
goto bad;
}
if (!_read_extents(dl)) {
log_error("Failed to read extents from %s", name);
goto bad;
}
log_very_verbose("Found %s in %sVG %s", name,
(dl->vgd.vg_status & VG_EXPORTED) ? "exported " : "",
dl->pvd.vg_name);
return dl;
bad:
dm_pool_free(dl->mem, dl);
return NULL;
}
struct disk_list *read_disk(const struct format_type *fmt, struct device *dev,
struct dm_pool *mem, const char *vg_name)
{
struct disk_list *dl;
if (!dev_open_readonly(dev))
return_NULL;
dl = __read_disk(fmt, dev, mem, vg_name);
if (!dev_close(dev))
stack;
return dl;
}
static void _add_pv_to_list(struct cmd_context *cmd, struct dm_list *head, struct disk_list *data)
{
struct pv_disk *pvd;
struct disk_list *diskl;
dm_list_iterate_items(diskl, head) {
pvd = &diskl->pvd;
if (!strncmp((char *)data->pvd.pv_uuid, (char *)pvd->pv_uuid,
sizeof(pvd->pv_uuid))) {
if (!dev_subsystem_part_major(cmd->dev_types, data->dev)) {
log_very_verbose("Ignoring duplicate PV %s on "
"%s", pvd->pv_uuid,
dev_name(data->dev));
return;
}
log_very_verbose("Duplicate PV %s - using %s %s",
pvd->pv_uuid, dev_subsystem_name(cmd->dev_types, data->dev),
dev_name(data->dev));
dm_list_del(&diskl->list);
break;
}
}
dm_list_add(head, &data->list);
}
struct _read_pvs_in_vg_baton {
const char *vg_name;
struct dm_list *head;
struct disk_list *data;
struct dm_pool *mem;
int empty;
};
static int _read_pv_in_vg(struct lvmcache_info *info, void *baton)
{
struct _read_pvs_in_vg_baton *b = baton;
b->empty = 0;
if (!lvmcache_device(info) ||
!(b->data = read_disk(lvmcache_fmt(info), lvmcache_device(info), b->mem, b->vg_name)))
return 0; /* stop here */
_add_pv_to_list(lvmcache_fmt(info)->cmd, b->head, b->data);
return 1;
}
/*
* Build a list of pv_d's structures, allocated from mem.
* We keep track of the first object allocated from the pool
* so we can free off all the memory if something goes wrong.
*/
int read_pvs_in_vg(const struct format_type *fmt, const char *vg_name,
struct dev_filter *filter, struct dm_pool *mem,
struct dm_list *head)
{
struct dev_iter *iter;
struct device *dev;
struct lvmcache_vginfo *vginfo;
struct _read_pvs_in_vg_baton baton;
baton.head = head;
baton.empty = 1;
baton.data = NULL;
baton.mem = mem;
baton.vg_name = vg_name;
/* Fast path if we already saw this VG and cached the list of PVs */
if (vg_name && (vginfo = lvmcache_vginfo_from_vgname(vg_name, NULL))) {
lvmcache_foreach_pv(vginfo, _read_pv_in_vg, &baton);
if (!baton.empty) {
/* Did we find the whole VG? */
if (!vg_name || is_orphan_vg(vg_name) ||
(baton.data && *baton.data->pvd.vg_name &&
dm_list_size(head) == baton.data->vgd.pv_cur))
return 1;
/* Failed */
dm_list_init(head);
/* vgcache_del(vg_name); */
}
}
if (!(iter = dev_iter_create(filter, 1))) {
log_error("read_pvs_in_vg: dev_iter_create failed");
return 0;
}
/* Otherwise do a complete scan */
for (dev = dev_iter_get(iter); dev; dev = dev_iter_get(iter)) {
if ((baton.data = read_disk(fmt, dev, mem, vg_name))) {
_add_pv_to_list(fmt->cmd, head, baton.data);
}
}
dev_iter_destroy(iter);
if (dm_list_empty(head))
return 0;
return 1;
}
static int _write_vgd(struct disk_list *data)
{
struct vg_disk *vgd = &data->vgd;
uint64_t pos = data->pvd.vg_on_disk.base;
log_debug_metadata("Writing %s VG metadata to %s at %" PRIu64 " len %" PRIsize_t,
data->pvd.vg_name, dev_name(data->dev), pos, sizeof(*vgd));
_xlate_vgd(vgd);
if (!dev_write(data->dev, pos, sizeof(*vgd), DEV_IO_FMT1, vgd))
return_0;
_xlate_vgd(vgd);
return 1;
}
static int _write_uuids(struct disk_list *data)
{
struct uuid_list *ul;
uint64_t pos = data->pvd.pv_uuidlist_on_disk.base;
uint64_t end = pos + data->pvd.pv_uuidlist_on_disk.size;
dm_list_iterate_items(ul, &data->uuids) {
if (pos >= end) {
log_error("Too many uuids to fit on %s",
dev_name(data->dev));
return 0;
}
log_debug_metadata("Writing %s uuidlist to %s at %" PRIu64 " len %d",
data->pvd.vg_name, dev_name(data->dev),
pos, NAME_LEN);
if (!dev_write(data->dev, pos, NAME_LEN, DEV_IO_FMT1, ul->uuid))
return_0;
pos += NAME_LEN;
}
return 1;
}
static int _write_lvd(struct device *dev, uint64_t pos, struct lv_disk *disk)
{
log_debug_metadata("Writing %s LV %s metadata to %s at %" PRIu64 " len %"
PRIsize_t, disk->vg_name, disk->lv_name, dev_name(dev),
pos, sizeof(*disk));
_xlate_lvd(disk);
if (!dev_write(dev, pos, sizeof(*disk), DEV_IO_FMT1, disk))
return_0;
_xlate_lvd(disk);
return 1;
}
static int _write_lvs(struct disk_list *data)
{
struct lvd_list *ll;
uint64_t pos, offset;
pos = data->pvd.lv_on_disk.base;
if (!dev_set(data->dev, pos, data->pvd.lv_on_disk.size, DEV_IO_FMT1, 0)) {
log_error("Couldn't zero lv area on device '%s'",
dev_name(data->dev));
return 0;
}
dm_list_iterate_items(ll, &data->lvds) {
offset = sizeof(struct lv_disk) * ll->lvd.lv_number;
if (offset + sizeof(struct lv_disk) > data->pvd.lv_on_disk.size) {
log_error("lv_number %d too large", ll->lvd.lv_number);
return 0;
}
if (!_write_lvd(data->dev, pos + offset, &ll->lvd))
return_0;
}
return 1;
}
static int _write_extents(struct disk_list *data)
{
size_t len = sizeof(struct pe_disk) * data->pvd.pe_total;
struct pe_disk *extents = data->extents;
uint64_t pos = data->pvd.pe_on_disk.base;
log_debug_metadata("Writing %s extents metadata to %s at %" PRIu64 " len %"
PRIsize_t, data->pvd.vg_name, dev_name(data->dev),
pos, len);
_xlate_extents(extents, data->pvd.pe_total);
if (!dev_write(data->dev, pos, len, DEV_IO_FMT1, extents))
return_0;
_xlate_extents(extents, data->pvd.pe_total);
return 1;
}
static int _write_pvd(struct disk_list *data)
{
char *buf;
uint64_t pos = data->pvd.pv_on_disk.base;
size_t size = data->pvd.pv_on_disk.size;
if (size < sizeof(struct pv_disk)) {
log_error("Invalid PV structure size.");
return 0;
}
/* Make sure that the gap between the PV structure and
the next one is zeroed in order to make non LVM tools
happy (idea from AED) */
buf = dm_zalloc(size);
if (!buf) {
log_error("Couldn't allocate temporary PV buffer.");
return 0;
}
memcpy(buf, &data->pvd, sizeof(struct pv_disk));
log_debug_metadata("Writing %s PV metadata to %s at %" PRIu64 " len %"
PRIsize_t, data->pvd.vg_name, dev_name(data->dev),
pos, size);
_xlate_pvd((struct pv_disk *) buf);
if (!dev_write(data->dev, pos, size, DEV_IO_FMT1, buf)) {
dm_free(buf);
return_0;
}
dm_free(buf);
return 1;
}
/*
* assumes the device has been opened.
*/
static int __write_all_pvd(const struct format_type *fmt __attribute__((unused)),
struct disk_list *data, int write_vg_metadata)
{
const char *pv_name = dev_name(data->dev);
if (!_write_pvd(data)) {
log_error("Failed to write PV structure onto %s", pv_name);
return 0;
}
/* vgcache_add(data->pvd.vg_name, data->vgd.vg_uuid, data->dev, fmt); */
/*
* Stop here for orphan PVs or if VG metadata write not requested.
*/
if ((data->pvd.vg_name[0] == '\0') || !write_vg_metadata) {
/* if (!test_mode())
vgcache_add(data->pvd.vg_name, NULL, data->dev, fmt); */
return 1;
}
/* if (!test_mode())
vgcache_add(data->pvd.vg_name, data->vgd.vg_uuid, data->dev,
fmt); */
if (!_write_vgd(data)) {
log_error("Failed to write VG data to %s", pv_name);
return 0;
}
if (!_write_uuids(data)) {
log_error("Failed to write PV uuid list to %s", pv_name);
return 0;
}
if (!_write_lvs(data)) {
log_error("Failed to write LV's to %s", pv_name);
return 0;
}
if (!_write_extents(data)) {
log_error("Failed to write extents to %s", pv_name);
return 0;
}
return 1;
}
/*
* opens the device and hands to the above fn.
*/
static int _write_all_pvd(const struct format_type *fmt, struct disk_list *data, int write_vg_metadata)
{
int r;
if (!data->dev)
return_0;
if (!dev_open(data->dev))
return_0;
r = __write_all_pvd(fmt, data, write_vg_metadata);
if (!dev_close(data->dev))
stack;
return r;
}
/*
* Writes all the given pv's to disk. Does very
* little sanity checking, so make sure correct
* data is passed to here.
*/
int write_disks(const struct format_type *fmt, struct dm_list *pvs, int write_vg_metadata)
{
struct disk_list *dl;
dm_list_iterate_items(dl, pvs) {
if (!(_write_all_pvd(fmt, dl, write_vg_metadata)))
return_0;
log_very_verbose("Successfully wrote data to %s",
dev_name(dl->dev));
}
return 1;
}

250
lib/format1/disk-rep.h Normal file
View File

@@ -0,0 +1,250 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef DISK_REP_FORMAT1_H
#define DISK_REP_FORMAT1_H
#include "metadata.h"
#include "toolcontext.h"
#define MAX_PV 256
#define MAX_LV 256
#define MAX_VG 99
#define LVM_BLK_MAJOR 58
#define MAX_PV_SIZE ((uint32_t) -1) /* 2TB in sectors - 1 */
#define PE_SIZE_PV_SIZE_REL 5 /* PV size must be at least 5 times PE size */
#define MAX_LE_TOTAL 65534 /* 2^16 - 2 */
#define MAX_PE_TOTAL ((uint32_t) -2)
#define UNMAPPED_EXTENT 0
/* volume group */
#define VG_ACTIVE 0x01 /* vg_status */
#define VG_EXPORTED 0x02 /* " */
#define VG_EXTENDABLE 0x04 /* " */
#define VG_READ 0x01 /* vg_access */
#define VG_WRITE 0x02 /* " */
#define VG_CLUSTERED 0x04 /* " */
#define VG_SHARED 0x08 /* " */
/* logical volume */
#define LV_ACTIVE 0x01 /* lv_status */
#define LV_SPINDOWN 0x02 /* " */
#define LV_PERSISTENT_MINOR 0x04 /* " */
#define LV_READ 0x01 /* lv_access */
#define LV_WRITE 0x02 /* " */
#define LV_SNAPSHOT 0x04 /* " */
#define LV_SNAPSHOT_ORG 0x08 /* " */
#define LV_BADBLOCK_ON 0x01 /* lv_badblock */
#define LV_STRICT 0x01 /* lv_allocation */
#define LV_CONTIGUOUS 0x02 /* " */
/* physical volume */
#define PV_ACTIVE 0x01 /* pv_status */
#define PV_ALLOCATABLE 0x02 /* pv_allocatable */
#define EXPORTED_TAG "PV_EXP" /* Identifier for exported PV */
#define IMPORTED_TAG "PV_IMP" /* Identifier for imported PV */
struct data_area {
uint32_t base;
uint32_t size;
} __attribute__ ((packed));
struct pv_disk {
int8_t id[2];
uint16_t version; /* lvm version */
struct data_area pv_on_disk;
struct data_area vg_on_disk;
struct data_area pv_uuidlist_on_disk;
struct data_area lv_on_disk;
struct data_area pe_on_disk;
int8_t pv_uuid[NAME_LEN];
int8_t vg_name[NAME_LEN];
int8_t system_id[NAME_LEN]; /* for vgexport/vgimport */
uint32_t pv_major;
uint32_t pv_number;
uint32_t pv_status;
uint32_t pv_allocatable;
uint32_t pv_size;
uint32_t lv_cur;
uint32_t pe_size;
uint32_t pe_total;
uint32_t pe_allocated;
/* only present on version == 2 pv's */
uint32_t pe_start;
} __attribute__ ((packed));
struct lv_disk {
int8_t lv_name[NAME_LEN];
int8_t vg_name[NAME_LEN];
uint32_t lv_access;
uint32_t lv_status;
uint32_t lv_open;
uint32_t lv_dev;
uint32_t lv_number;
uint32_t lv_mirror_copies; /* for future use */
uint32_t lv_recovery; /* " */
uint32_t lv_schedule; /* " */
uint32_t lv_size;
uint32_t lv_snapshot_minor; /* minor number of original */
uint16_t lv_chunk_size; /* chunk size of snapshot */
uint16_t dummy;
uint32_t lv_allocated_le;
uint32_t lv_stripes;
uint32_t lv_stripesize;
uint32_t lv_badblock; /* for future use */
uint32_t lv_allocation;
uint32_t lv_io_timeout; /* for future use */
uint32_t lv_read_ahead;
} __attribute__ ((packed));
struct vg_disk {
int8_t vg_uuid[ID_LEN]; /* volume group UUID */
int8_t vg_name_dummy[NAME_LEN - ID_LEN]; /* rest of v1 VG name */
uint32_t vg_number; /* volume group number */
uint32_t vg_access; /* read/write */
uint32_t vg_status; /* active or not */
uint32_t lv_max; /* maximum logical volumes */
uint32_t lv_cur; /* current logical volumes */
uint32_t lv_open; /* open logical volumes */
uint32_t pv_max; /* maximum physical volumes */
uint32_t pv_cur; /* current physical volumes FU */
uint32_t pv_act; /* active physical volumes */
uint32_t dummy;
uint32_t vgda; /* volume group descriptor arrays FU */
uint32_t pe_size; /* physical extent size in sectors */
uint32_t pe_total; /* total of physical extents */
uint32_t pe_allocated; /* allocated physical extents */
uint32_t pvg_total; /* physical volume groups FU */
} __attribute__ ((packed));
struct pe_disk {
uint16_t lv_num;
uint16_t le_num;
} __attribute__ ((packed));
struct uuid_list {
struct dm_list list;
char uuid[NAME_LEN] __attribute__((aligned(8)));
};
struct lvd_list {
struct dm_list list;
struct lv_disk lvd;
};
struct disk_list {
struct dm_list list;
struct dm_pool *mem;
struct device *dev;
struct pv_disk pvd __attribute__((aligned(8)));
struct vg_disk vgd __attribute__((aligned(8)));
struct dm_list uuids __attribute__((aligned(8)));
struct dm_list lvds __attribute__((aligned(8)));
struct pe_disk *extents __attribute__((aligned(8)));
};
/*
* Layout constants.
*/
#define METADATA_ALIGN 4096UL
#define LVM1_PE_ALIGN (65536UL >> SECTOR_SHIFT) /* PE alignment */
#define METADATA_BASE 0UL
#define PV_SIZE 1024UL
#define VG_SIZE 4096UL
/*
* Functions to calculate layout info.
*/
int calculate_layout(struct disk_list *dl);
int calculate_extent_count(struct physical_volume *pv, uint32_t extent_size,
uint32_t max_extent_count, uint64_t pe_start);
/*
* Low level io routines which read/write
* disk_lists.
*/
struct disk_list *read_disk(const struct format_type *fmt, struct device *dev,
struct dm_pool *mem, const char *vg_name);
int read_pvs_in_vg(const struct format_type *fmt, const char *vg_name,
struct dev_filter *filter,
struct dm_pool *mem, struct dm_list *results);
int write_disks(const struct format_type *fmt, struct dm_list *pvds,
int write_vg_metadata);
/*
* Functions to translate to between disk and in
* core structures.
*/
int import_pv(const struct format_type *fmt, struct dm_pool *mem,
struct device *dev, struct volume_group *vg,
struct physical_volume *pv, struct pv_disk *pvd,
struct vg_disk *vgd);
int export_pv(struct cmd_context *cmd, struct dm_pool *mem,
struct volume_group *vg,
struct pv_disk *pvd, struct physical_volume *pv);
int import_vg(struct dm_pool *mem,
struct volume_group *vg, struct disk_list *dl);
int export_vg(struct vg_disk *vgd, struct volume_group *vg);
int import_lv(struct cmd_context *cmd, struct dm_pool *mem,
struct logical_volume *lv, struct lv_disk *lvd);
int import_extents(struct cmd_context *cmd, struct volume_group *vg,
struct dm_list *pvds);
int export_extents(struct disk_list *dl, uint32_t lv_num,
struct logical_volume *lv, struct physical_volume *pv);
int import_pvs(const struct format_type *fmt, struct dm_pool *mem,
struct volume_group *vg, struct dm_list *pvds);
int import_lvs(struct dm_pool *mem, struct volume_group *vg, struct dm_list *pvds);
int export_lvs(struct disk_list *dl, struct volume_group *vg,
struct physical_volume *pv, const char *dev_dir);
int import_snapshots(struct dm_pool *mem, struct volume_group *vg,
struct dm_list *pvds);
int export_uuids(struct disk_list *dl, struct volume_group *vg);
void export_numbers(struct dm_list *pvds, struct volume_group *vg);
void export_pv_act(struct dm_list *pvds);
int munge_pvd(struct device *dev, struct pv_disk *pvd);
int read_vgd(struct device *dev, struct vg_disk *vgd, struct pv_disk *pvd);
/* blech */
int get_free_vg_number(struct format_instance *fid, struct dev_filter *filter,
const char *candidate_vg, int *result);
int export_vg_number(struct format_instance *fid, struct dm_list *pvds,
const char *vg_name, struct dev_filter *filter);
int generate_lvm1_system_id(struct cmd_context *cmd, char *s, const char *prefix);
#endif

631
lib/format1/format1.c Normal file
View File

@@ -0,0 +1,631 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "disk-rep.h"
#include "limits.h"
#include "display.h"
#include "toolcontext.h"
#include "lvm1-label.h"
#include "format1.h"
#include "segtype.h"
#include "pv_alloc.h"
/* VG consistency checks */
static int _check_vgs(struct dm_list *pvs, struct volume_group *vg)
{
struct dm_list *pvh, *t;
struct disk_list *dl = NULL;
struct disk_list *first = NULL;
uint32_t pv_count = 0;
uint32_t exported = 0;
int first_time = 1;
/*
* If there are exported and unexported PVs, ignore exported ones.
* This means an active VG won't be affected if disks are inserted
* bearing an exported VG with the same name.
*/
dm_list_iterate_items(dl, pvs) {
if (first_time) {
exported = dl->pvd.pv_status & VG_EXPORTED;
first_time = 0;
continue;
}
if (exported != (dl->pvd.pv_status & VG_EXPORTED)) {
/* Remove exported PVs */
dm_list_iterate_safe(pvh, t, pvs) {
dl = dm_list_item(pvh, struct disk_list);
if (dl->pvd.pv_status & VG_EXPORTED)
dm_list_del(pvh);
}
break;
}
}
/* Remove any PVs with VG structs that differ from the first */
dm_list_iterate_safe(pvh, t, pvs) {
dl = dm_list_item(pvh, struct disk_list);
if (!first)
first = dl;
else if (memcmp(&first->vgd, &dl->vgd, sizeof(first->vgd))) {
log_error("VG data differs between PVs %s and %s",
dev_name(first->dev), dev_name(dl->dev));
log_debug_metadata("VG data on %s: %s %s %" PRIu32 " %" PRIu32
" %" PRIu32 " %" PRIu32 " %" PRIu32 " %"
PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32
" %" PRIu32 " %" PRIu32 " %" PRIu32 " %"
PRIu32 " %" PRIu32 " %" PRIu32,
dev_name(first->dev), first->vgd.vg_uuid,
first->vgd.vg_name_dummy,
first->vgd.vg_number, first->vgd.vg_access,
first->vgd.vg_status, first->vgd.lv_max,
first->vgd.lv_cur, first->vgd.lv_open,
first->vgd.pv_max, first->vgd.pv_cur,
first->vgd.pv_act, first->vgd.dummy,
first->vgd.vgda, first->vgd.pe_size,
first->vgd.pe_total, first->vgd.pe_allocated,
first->vgd.pvg_total);
log_debug_metadata("VG data on %s: %s %s %" PRIu32 " %" PRIu32
" %" PRIu32 " %" PRIu32 " %" PRIu32 " %"
PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32
" %" PRIu32 " %" PRIu32 " %" PRIu32 " %"
PRIu32 " %" PRIu32 " %" PRIu32,
dev_name(dl->dev), dl->vgd.vg_uuid,
dl->vgd.vg_name_dummy, dl->vgd.vg_number,
dl->vgd.vg_access, dl->vgd.vg_status,
dl->vgd.lv_max, dl->vgd.lv_cur,
dl->vgd.lv_open, dl->vgd.pv_max,
dl->vgd.pv_cur, dl->vgd.pv_act, dl->vgd.dummy,
dl->vgd.vgda, dl->vgd.pe_size,
dl->vgd.pe_total, dl->vgd.pe_allocated,
dl->vgd.pvg_total);
dm_list_del(pvh);
return 0;
}
pv_count++;
}
/* On entry to fn, list known to be non-empty */
if (pv_count != first->vgd.pv_cur) {
log_error("%d PV(s) found for VG %s: expected %d",
pv_count, first->pvd.vg_name, first->vgd.pv_cur);
vg->status |= PARTIAL_VG;
}
return 1;
}
static int _fix_partial_vg(struct volume_group *vg, struct dm_list *pvs)
{
uint32_t extent_count = 0;
struct disk_list *dl;
struct dm_list *pvh;
struct pv_list *pvl;
struct lv_list *ll;
struct lv_segment *seg;
/*
* FIXME: code should remap missing segments to error segment.
* Also current mapping code allocates 1 segment per missing extent.
* For now bail out completely - allocated structures are not complete
*/
dm_list_iterate_items(ll, &vg->lvs)
dm_list_iterate_items(seg, &ll->lv->segments) {
/* area_count is always 1 here, s == 0 */
if (seg_type(seg, 0) != AREA_PV)
continue;
if (seg_pv(seg, 0))
continue;
log_error("Partial mode support for missing lvm1 PVs and "
"partially available LVs is currently not implemented.");
return 0;
}
dm_list_iterate(pvh, pvs) {
dl = dm_list_item(pvh, struct disk_list);
extent_count += dl->pvd.pe_total;
}
/* FIXME: move this to one place to pv_manip */
if (!(pvl = dm_pool_zalloc(vg->vgmem, sizeof(*pvl))) ||
!(pvl->pv = dm_pool_zalloc(vg->vgmem, sizeof(*pvl->pv))))
return_0;
/* Use vg uuid with replaced first chars to "missing" as missing PV UUID */
memcpy(&pvl->pv->id.uuid, vg->id.uuid, sizeof(pvl->pv->id.uuid));
memcpy(&pvl->pv->id.uuid, "missing", 7);
if (!(pvl->pv->vg_name = dm_pool_strdup(vg->vgmem, vg->name)))
goto_out;
memcpy(&pvl->pv->vgid, &vg->id, sizeof(vg->id));
pvl->pv->status |= MISSING_PV;
dm_list_init(&pvl->pv->tags);
dm_list_init(&pvl->pv->segments);
pvl->pv->pe_size = vg->extent_size;
pvl->pv->pe_count = vg->extent_count - extent_count;
if (!alloc_pv_segment_whole_pv(vg->vgmem, pvl->pv))
goto_out;
add_pvl_to_vgs(vg, pvl);
log_debug_metadata("%s: partial VG, allocated missing PV using %d extents.",
vg->name, pvl->pv->pe_count);
return 1;
out:
dm_pool_free(vg->vgmem, pvl);
return 0;
}
static struct volume_group *_format1_vg_read(struct format_instance *fid,
const char *vg_name,
struct metadata_area *mda __attribute__((unused)),
struct cached_vg_fmtdata **vg_fmtdata __attribute__((unused)),
unsigned *use_previous_vg __attribute__((unused)),
int single_device __attribute__((unused)))
{
struct volume_group *vg;
struct disk_list *dl;
DM_LIST_INIT(pvs);
/* Strip dev_dir if present */
if (vg_name)
vg_name = strip_dir(vg_name, fid->fmt->cmd->dev_dir);
if (!(vg = alloc_vg("format1_vg_read", fid->fmt->cmd, NULL)))
return_NULL;
if (!read_pvs_in_vg(fid->fmt, vg_name, fid->fmt->cmd->filter,
vg->vgmem, &pvs))
goto_bad;
if (dm_list_empty(&pvs))
goto_bad;
if (!_check_vgs(&pvs, vg))
goto_bad;
dl = dm_list_item(pvs.n, struct disk_list);
if (!import_vg(vg->vgmem, vg, dl))
goto_bad;
if (!import_pvs(fid->fmt, vg->vgmem, vg, &pvs))
goto_bad;
if (!import_lvs(vg->vgmem, vg, &pvs))
goto_bad;
if (!import_extents(fid->fmt->cmd, vg, &pvs))
goto_bad;
/* FIXME: workaround - temporary assignment of fid */
vg->fid = fid;
if (!import_snapshots(vg->vgmem, vg, &pvs)) {
vg->fid = NULL;
goto_bad;
}
vg->fid = NULL;
/* Fix extents counts by adding missing PV if partial VG */
if ((vg->status & PARTIAL_VG) && !_fix_partial_vg(vg, &pvs))
goto_bad;
vg_set_fid(vg, fid);
return vg;
bad:
release_vg(vg);
return NULL;
}
static struct disk_list *_flatten_pv(struct format_instance *fid,
struct dm_pool *mem, struct volume_group *vg,
struct physical_volume *pv,
const char *dev_dir)
{
struct disk_list *dl = dm_pool_alloc(mem, sizeof(*dl));
if (!dl)
return_NULL;
dl->mem = mem;
dl->dev = pv->dev;
dm_list_init(&dl->uuids);
dm_list_init(&dl->lvds);
if (!export_pv(fid->fmt->cmd, mem, vg, &dl->pvd, pv) ||
!export_vg(&dl->vgd, vg) ||
!export_uuids(dl, vg) ||
!export_lvs(dl, vg, pv, dev_dir) || !calculate_layout(dl)) {
dm_pool_free(mem, dl);
return_NULL;
}
return dl;
}
static int _flatten_vg(struct format_instance *fid, struct dm_pool *mem,
struct volume_group *vg,
struct dm_list *pvds, const char *dev_dir,
struct dev_filter *filter)
{
struct pv_list *pvl;
struct disk_list *data;
dm_list_iterate_items(pvl, &vg->pvs) {
if (!(data = _flatten_pv(fid, mem, vg, pvl->pv, dev_dir)))
return_0;
dm_list_add(pvds, &data->list);
}
export_numbers(pvds, vg);
export_pv_act(pvds);
if (!export_vg_number(fid, pvds, vg->name, filter))
return_0;
return 1;
}
static int _format1_vg_write(struct format_instance *fid, struct volume_group *vg,
struct metadata_area *mda __attribute__((unused)))
{
struct dm_pool *mem = dm_pool_create("lvm1 vg_write", VG_MEMPOOL_CHUNK);
struct dm_list pvds;
int r = 0;
if (!mem)
return_0;
dm_list_init(&pvds);
r = (_flatten_vg(fid, mem, vg, &pvds, fid->fmt->cmd->dev_dir,
fid->fmt->cmd->filter) &&
write_disks(fid->fmt, &pvds, 1));
lvmcache_update_vg(vg, 0);
dm_pool_destroy(mem);
return r;
}
static int _format1_pv_read(const struct format_type *fmt, const char *pv_name,
struct physical_volume *pv, int scan_label_only __attribute__((unused)))
{
struct dm_pool *mem = dm_pool_create("lvm1 pv_read", 1024);
struct disk_list *dl;
struct device *dev;
int r = 0;
log_very_verbose("Reading physical volume data %s from disk", pv_name);
if (!mem)
return_0;
if (!(dev = dev_cache_get(pv_name, fmt->cmd->filter)))
goto_out;
if (!(dl = read_disk(fmt, dev, mem, NULL)))
goto_out;
if (!import_pv(fmt, fmt->cmd->mem, dl->dev, NULL, pv, &dl->pvd, &dl->vgd))
goto_out;
pv->fmt = fmt;
r = 1;
out:
dm_pool_destroy(mem);
return r;
}
static int _format1_pv_initialise(const struct format_type * fmt,
struct pv_create_args *pva,
struct physical_volume * pv)
{
if (pv->size > MAX_PV_SIZE)
pv->size--;
if (pv->size > MAX_PV_SIZE) {
log_error("Physical volumes cannot be bigger than %s",
display_size(fmt->cmd, (uint64_t) MAX_PV_SIZE));
return 0;
}
/* Nothing more to do if extent size isn't provided */
if (!pva->extent_size)
return 1;
/*
* This works out pe_start and pe_count.
*/
if (!calculate_extent_count(pv, pva->extent_size, pva->extent_count, pva->pe_start))
return_0;
/* Retain existing extent locations exactly */
if (((pva->pe_start || pva->extent_count) && (pva->pe_start != pv->pe_start)) ||
(pva->extent_count && (pva->extent_count != pv->pe_count))) {
log_error("Metadata would overwrite physical extents");
return 0;
}
return 1;
}
static int _format1_pv_setup(const struct format_type *fmt,
struct physical_volume *pv,
struct volume_group *vg)
{
struct pv_create_args pva = { .id = {{0}},
.idp = NULL,
.ba_start = 0,
.ba_size = 0,
.pe_start = 0,
.extent_count = 0,
.extent_size = vg->extent_size};
return _format1_pv_initialise(fmt, &pva, pv);
}
static int _format1_lv_setup(struct format_instance *fid, struct logical_volume *lv)
{
uint64_t max_size = UINT_MAX;
if (!*lv->lvid.s)
lvid_from_lvnum(&lv->lvid, &lv->vg->id, find_free_lvnum(lv));
if (lv->le_count > MAX_LE_TOTAL) {
log_error("logical volumes cannot contain more than "
"%d extents.", MAX_LE_TOTAL);
return 0;
}
if (lv->size > max_size) {
log_error("logical volumes cannot be larger than %s",
display_size(fid->fmt->cmd, max_size));
return 0;
}
return 1;
}
static int _format1_pv_write(const struct format_type *fmt, struct physical_volume *pv)
{
struct dm_pool *mem;
struct disk_list *dl;
struct dm_list pvs;
struct lvmcache_info *info;
int pe_count, pe_size, pe_start;
int r = 1;
if (!(info = lvmcache_add(fmt->labeller, (char *) &pv->id, pv->dev,
pv->vg_name, NULL, 0)))
return_0;
lvmcache_update_pv(info, pv, fmt);
lvmcache_del_mdas(info);
lvmcache_del_das(info);
lvmcache_del_bas(info);
dm_list_init(&pvs);
pe_count = pv->pe_count;
pe_size = pv->pe_size;
pe_start = pv->pe_start;
/* Ensure any residual PE structure is gone */
pv->pe_size = pv->pe_count = 0;
pv->pe_start = LVM1_PE_ALIGN;
if (!(mem = dm_pool_create("lvm1 pv_write", 1024)))
return_0;
if (!(dl = dm_pool_alloc(mem, sizeof(*dl))))
goto_bad;
dl->mem = mem;
dl->dev = pv->dev;
dm_list_init(&dl->uuids);
dm_list_init(&dl->lvds);
if (!export_pv(fmt->cmd, mem, NULL, &dl->pvd, pv))
goto_bad;
/* must be set to be able to zero gap after PV structure in
dev_write in order to make other disk tools happy */
dl->pvd.pv_on_disk.base = METADATA_BASE;
dl->pvd.pv_on_disk.size = PV_SIZE;
dl->pvd.pe_on_disk.base = LVM1_PE_ALIGN << SECTOR_SHIFT;
dm_list_add(&pvs, &dl->list);
if (!write_disks(fmt, &pvs, 0))
goto_bad;
goto out;
bad:
r = 0;
out:
pv->pe_size = pe_size;
pv->pe_count = pe_count;
pv->pe_start = pe_start;
dm_pool_destroy(mem);
return r;
}
static int _format1_vg_setup(struct format_instance *fid, struct volume_group *vg)
{
/* just check max_pv and max_lv */
if (!vg->max_lv || vg->max_lv >= MAX_LV)
vg->max_lv = MAX_LV - 1;
if (!vg->max_pv || vg->max_pv >= MAX_PV)
vg->max_pv = MAX_PV - 1;
if (!vg_check_new_extent_size(vg->fid->fmt, vg->extent_size))
return_0;
/* Generate lvm1_system_id if not yet set */
if (!*vg->lvm1_system_id &&
!generate_lvm1_system_id(vg->cmd, vg->lvm1_system_id, ""))
return_0;
return 1;
}
static int _format1_segtype_supported(struct format_instance *fid __attribute__((unused)),
const struct segment_type *segtype)
{
if (!(segtype->flags & SEG_FORMAT1_SUPPORT))
return_0;
return 1;
}
static struct metadata_area_ops _metadata_format1_ops = {
.vg_read = _format1_vg_read,
.vg_write = _format1_vg_write,
};
static struct format_instance *_format1_create_instance(const struct format_type *fmt,
const struct format_instance_ctx *fic)
{
struct format_instance *fid;
struct metadata_area *mda;
if (!(fid = alloc_fid(fmt, fic)))
return_NULL;
/* Define a NULL metadata area */
if (!(mda = dm_pool_zalloc(fid->mem, sizeof(*mda)))) {
log_error("Unable to allocate metadata area structure "
"for lvm1 format");
goto bad;
}
mda->ops = &_metadata_format1_ops;
mda->metadata_locn = NULL;
mda->status = 0;
dm_list_add(&fid->metadata_areas_in_use, &mda->list);
return fid;
bad:
dm_pool_destroy(fid->mem);
return NULL;
}
static void _format1_destroy_instance(struct format_instance *fid)
{
if (--fid->ref_count <= 1)
dm_pool_destroy(fid->mem);
}
static void _format1_destroy(struct format_type *fmt)
{
if (fmt->orphan_vg)
free_orphan_vg(fmt->orphan_vg);
dm_free(fmt);
}
static struct format_handler _format1_ops = {
.pv_read = _format1_pv_read,
.pv_initialise = _format1_pv_initialise,
.pv_setup = _format1_pv_setup,
.pv_write = _format1_pv_write,
.lv_setup = _format1_lv_setup,
.vg_setup = _format1_vg_setup,
.segtype_supported = _format1_segtype_supported,
.create_instance = _format1_create_instance,
.destroy_instance = _format1_destroy_instance,
.destroy = _format1_destroy,
};
#ifdef LVM1_INTERNAL
struct format_type *init_lvm1_format(struct cmd_context *cmd)
#else /* Shared */
struct format_type *init_format(struct cmd_context *cmd);
struct format_type *init_format(struct cmd_context *cmd)
#endif
{
struct format_type *fmt = dm_malloc(sizeof(*fmt));
struct format_instance_ctx fic;
struct format_instance *fid;
if (!fmt) {
log_error("Failed to allocate format1 format type structure.");
return NULL;
}
fmt->cmd = cmd;
fmt->ops = &_format1_ops;
fmt->name = FMT_LVM1_NAME;
fmt->alias = NULL;
fmt->orphan_vg_name = FMT_LVM1_ORPHAN_VG_NAME;
fmt->features = FMT_RESTRICTED_LVIDS | FMT_ORPHAN_ALLOCATABLE |
FMT_RESTRICTED_READAHEAD | FMT_OBSOLETE |
FMT_SYSTEMID_ON_PVS;
fmt->private = NULL;
dm_list_init(&fmt->mda_ops);
if (!(fmt->labeller = lvm1_labeller_create(fmt))) {
log_error("Couldn't create lvm1 label handler.");
dm_free(fmt);
return NULL;
}
if (!(label_register_handler(fmt->labeller))) {
log_error("Couldn't register lvm1 label handler.");
fmt->labeller->ops->destroy(fmt->labeller);
dm_free(fmt);
return NULL;
}
if (!(fmt->orphan_vg = alloc_vg("format1_orphan", cmd, fmt->orphan_vg_name))) {
log_error("Couldn't create lvm1 orphan VG.");
dm_free(fmt);
return NULL;
}
fic.type = FMT_INSTANCE_AUX_MDAS;
fic.context.vg_ref.vg_name = fmt->orphan_vg_name;
fic.context.vg_ref.vg_id = NULL;
if (!(fid = _format1_create_instance(fmt, &fic))) {
_format1_destroy(fmt);
return_NULL;
}
vg_set_fid(fmt->orphan_vg, fid);
log_very_verbose("Initialised format: %s", fmt->name);
return fmt;
}

29
lib/format1/format1.h Normal file
View File

@@ -0,0 +1,29 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _LVM_FORMAT1_H
#define _LVM_FORMAT1_H
#include "metadata.h"
#include "lvmcache.h"
#define FMT_LVM1_NAME "lvm1"
#define FMT_LVM1_ORPHAN_VG_NAME ORPHAN_VG_NAME(FMT_LVM1_NAME)
#ifdef LVM1_INTERNAL
struct format_type *init_lvm1_format(struct cmd_context *cmd);
#endif
#endif

680
lib/format1/import-export.c Normal file
View File

@@ -0,0 +1,680 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* Translates between disk and in-core formats.
*/
#include "lib.h"
#include "disk-rep.h"
#include "lvm-string.h"
#include "toolcontext.h"
#include "segtype.h"
#include "pv_alloc.h"
#include "display.h"
#include "metadata.h"
#include <time.h>
static int _check_vg_name(const char *name)
{
return strlen(name) < NAME_LEN;
}
/*
* Extracts the last part of a path.
*/
static char *_create_lv_name(struct dm_pool *mem, const char *full_name)
{
const char *ptr = strrchr(full_name, '/');
if (!ptr)
ptr = full_name;
else
ptr++;
return dm_pool_strdup(mem, ptr);
}
int import_pv(const struct format_type *fmt, struct dm_pool *mem,
struct device *dev, struct volume_group *vg,
struct physical_volume *pv, struct pv_disk *pvd,
struct vg_disk *vgd)
{
uint64_t size;
memset(pv, 0, sizeof(*pv));
memcpy(&pv->id, pvd->pv_uuid, ID_LEN);
pv->dev = dev;
if (!*pvd->vg_name)
pv->vg_name = fmt->orphan_vg_name;
else if (!(pv->vg_name = dm_pool_strdup(mem, (char *)pvd->vg_name))) {
log_error("Volume Group name allocation failed.");
return 0;
}
memcpy(&pv->vgid, vgd->vg_uuid, sizeof(vg->id));
/* Store system_id from first PV if PV belongs to a VG */
if (vg && !*vg->lvm1_system_id)
strncpy(vg->lvm1_system_id, (char *)pvd->system_id, NAME_LEN);
if (vg &&
strncmp(vg->lvm1_system_id, (char *)pvd->system_id, sizeof(pvd->system_id)))
log_very_verbose("System ID %s on %s differs from %s for "
"volume group", pvd->system_id,
pv_dev_name(pv), vg->lvm1_system_id);
/*
* If exported, we still need to flag in pv->status too because
* we don't always have a struct volume_group when we need this.
*/
if (pvd->pv_status & VG_EXPORTED)
pv->status |= EXPORTED_VG;
if (pvd->pv_allocatable)
pv->status |= ALLOCATABLE_PV;
pv->size = pvd->pv_size;
pv->pe_size = pvd->pe_size;
pv->pe_start = pvd->pe_start;
pv->pe_count = pvd->pe_total;
pv->pe_alloc_count = 0;
pv->pe_align = 0;
pv->is_labelled = 0; /* format1 PVs have no label */
pv->label_sector = 0;
/* Fix up pv size if missing or impossibly large */
if (!pv->size || pv->size > (1ULL << 62)) {
if (!dev_get_size(dev, &pv->size)) {
log_error("%s: Couldn't get size.", pv_dev_name(pv));
return 0;
}
log_verbose("Fixing up missing format1 size (%s) "
"for PV %s", display_size(fmt->cmd, pv->size),
pv_dev_name(pv));
if (vg) {
size = pv->pe_count * (uint64_t) vg->extent_size +
pv->pe_start;
if (size > pv->size)
log_warn("WARNING: Physical Volume %s is too "
"large for underlying device",
pv_dev_name(pv));
}
}
dm_list_init(&pv->tags);
dm_list_init(&pv->segments);
if (!alloc_pv_segment_whole_pv(mem, pv))
return_0;
return 1;
}
int generate_lvm1_system_id(struct cmd_context *cmd, char *s, const char *prefix)
{
if (dm_snprintf(s, NAME_LEN, "%s%s" FMTu64,
prefix, cmd->hostname, (uint64_t)time(NULL)) < 0) {
log_error("Generated LVM1 format system_id too long");
return 0;
}
return 1;
}
int export_pv(struct cmd_context *cmd, struct dm_pool *mem __attribute__((unused)),
struct volume_group *vg,
struct pv_disk *pvd, struct physical_volume *pv)
{
memset(pvd, 0, sizeof(*pvd));
pvd->id[0] = 'H';
pvd->id[1] = 'M';
pvd->version = 1;
memcpy(pvd->pv_uuid, pv->id.uuid, ID_LEN);
if (pv->vg_name && !is_orphan(pv) && !(pv->status & UNLABELLED_PV)) {
if (!_check_vg_name(pv->vg_name))
return_0;
strncpy((char *)pvd->vg_name, pv->vg_name, sizeof(pvd->vg_name));
}
/* Preserve existing system_id if it exists */
if (vg && vg->lvm1_system_id && *vg->lvm1_system_id)
strncpy((char *)pvd->system_id, vg->lvm1_system_id, sizeof(pvd->system_id));
else if (vg && vg->system_id && *vg->system_id)
strncpy((char *)pvd->system_id, vg->system_id, sizeof(pvd->system_id));
/* Is VG already exported or being exported? */
if (vg && vg_is_exported(vg)) {
/* Does system_id need setting? */
if (!vg->lvm1_system_id || !*vg->lvm1_system_id ||
strncmp(vg->lvm1_system_id, EXPORTED_TAG,
sizeof(EXPORTED_TAG) - 1)) {
if (!generate_lvm1_system_id(cmd, (char *)pvd->system_id, EXPORTED_TAG))
return_0;
}
if (strlen((char *)pvd->vg_name) + sizeof(EXPORTED_TAG) >
sizeof(pvd->vg_name)) {
log_error("Volume group name %s too long to export",
pvd->vg_name);
return 0;
}
strcat((char *)pvd->vg_name, EXPORTED_TAG);
}
/* Is VG being imported? */
if (vg && !vg_is_exported(vg) && vg->lvm1_system_id && *vg->lvm1_system_id &&
!strncmp(vg->lvm1_system_id, EXPORTED_TAG, sizeof(EXPORTED_TAG) - 1)) {
if (!generate_lvm1_system_id(cmd, (char *)pvd->system_id, IMPORTED_TAG))
return_0;
}
/* Generate system_id if PV is in VG */
if (!pvd->system_id[0])
if (!generate_lvm1_system_id(cmd, (char *)pvd->system_id, ""))
return_0;
/* Update internal system_id if we changed it */
if (vg && vg->lvm1_system_id &&
(!*vg->lvm1_system_id ||
strncmp(vg->lvm1_system_id, (char *)pvd->system_id, sizeof(pvd->system_id))))
strncpy(vg->lvm1_system_id, (char *)pvd->system_id, NAME_LEN);
//pvd->pv_major = MAJOR(pv->dev);
if (pv->status & ALLOCATABLE_PV)
pvd->pv_allocatable = PV_ALLOCATABLE;
pvd->pv_size = pv->size;
pvd->lv_cur = 0; /* this is set when exporting the lv list */
if (vg)
pvd->pe_size = vg->extent_size;
else
pvd->pe_size = pv->pe_size;
pvd->pe_total = pv->pe_count;
pvd->pe_allocated = pv->pe_alloc_count;
pvd->pe_start = pv->pe_start;
return 1;
}
int import_vg(struct dm_pool *mem,
struct volume_group *vg, struct disk_list *dl)
{
struct vg_disk *vgd = &dl->vgd;
memcpy(vg->id.uuid, vgd->vg_uuid, ID_LEN);
if (!_check_vg_name((char *)dl->pvd.vg_name))
return_0;
if (!(vg->name = dm_pool_strdup(mem, (char *)dl->pvd.vg_name)))
return_0;
if (!(vg->lvm1_system_id = dm_pool_zalloc(mem, NAME_LEN + 1)))
return_0;
if (vgd->vg_status & VG_EXPORTED)
vg->status |= EXPORTED_VG;
if (vgd->vg_status & VG_EXTENDABLE)
vg->status |= RESIZEABLE_VG;
if (vgd->vg_access & VG_READ)
vg->status |= LVM_READ;
if (vgd->vg_access & VG_WRITE)
vg->status |= LVM_WRITE;
if (vgd->vg_access & VG_CLUSTERED)
vg->status |= CLUSTERED;
if (vgd->vg_access & VG_SHARED)
vg->status |= SHARED;
vg->extent_size = vgd->pe_size;
vg->extent_count = vgd->pe_total;
vg->free_count = vgd->pe_total;
vg->max_lv = vgd->lv_max;
vg->max_pv = vgd->pv_max;
vg->alloc = ALLOC_NORMAL;
return 1;
}
int export_vg(struct vg_disk *vgd, struct volume_group *vg)
{
memset(vgd, 0, sizeof(*vgd));
memcpy(vgd->vg_uuid, vg->id.uuid, ID_LEN);
if (vg->status & LVM_READ)
vgd->vg_access |= VG_READ;
if (vg->status & LVM_WRITE)
vgd->vg_access |= VG_WRITE;
if (vg_is_clustered(vg))
vgd->vg_access |= VG_CLUSTERED;
if (vg->status & SHARED)
vgd->vg_access |= VG_SHARED;
if (vg_is_exported(vg))
vgd->vg_status |= VG_EXPORTED;
if (vg_is_resizeable(vg))
vgd->vg_status |= VG_EXTENDABLE;
vgd->lv_max = vg->max_lv;
vgd->lv_cur = vg_visible_lvs(vg) + snapshot_count(vg);
vgd->pv_max = vg->max_pv;
vgd->pv_cur = vg->pv_count;
vgd->pe_size = vg->extent_size;
vgd->pe_total = vg->extent_count;
vgd->pe_allocated = vg->extent_count - vg->free_count;
return 1;
}
int import_lv(struct cmd_context *cmd, struct dm_pool *mem,
struct logical_volume *lv, struct lv_disk *lvd)
{
if (!(lv->name = _create_lv_name(mem, (char *)lvd->lv_name)))
return_0;
lv->status |= VISIBLE_LV;
if (lvd->lv_status & LV_SPINDOWN)
lv->status |= SPINDOWN_LV;
if (lvd->lv_status & LV_PERSISTENT_MINOR) {
lv->status |= FIXED_MINOR;
lv->minor = MINOR(lvd->lv_dev);
lv->major = MAJOR(lvd->lv_dev);
} else {
lv->major = -1;
lv->minor = -1;
}
if (lvd->lv_access & LV_READ)
lv->status |= LVM_READ;
if (lvd->lv_access & LV_WRITE)
lv->status |= LVM_WRITE;
if (lvd->lv_badblock)
lv->status |= BADBLOCK_ON;
/* Drop the unused LV_STRICT here */
if (lvd->lv_allocation & LV_CONTIGUOUS)
lv->alloc = ALLOC_CONTIGUOUS;
else
lv->alloc = ALLOC_NORMAL;
if (!lvd->lv_read_ahead)
lv->read_ahead = cmd->default_settings.read_ahead;
else
lv->read_ahead = lvd->lv_read_ahead;
lv->size = lvd->lv_size;
lv->le_count = lvd->lv_allocated_le;
return 1;
}
static void _export_lv(struct lv_disk *lvd, struct volume_group *vg,
struct logical_volume *lv, const char *dev_dir)
{
memset(lvd, 0, sizeof(*lvd));
snprintf((char *)lvd->lv_name, sizeof(lvd->lv_name), "%s%s/%s",
dev_dir, vg->name, lv->name);
(void) dm_strncpy((char *)lvd->vg_name, vg->name, sizeof(lvd->vg_name));
if (lv->status & LVM_READ)
lvd->lv_access |= LV_READ;
if (lv->status & LVM_WRITE)
lvd->lv_access |= LV_WRITE;
if (lv->status & SPINDOWN_LV)
lvd->lv_status |= LV_SPINDOWN;
if (lv->status & FIXED_MINOR) {
lvd->lv_status |= LV_PERSISTENT_MINOR;
lvd->lv_dev = MKDEV(lv->major, lv->minor);
} else {
lvd->lv_dev = MKDEV(LVM_BLK_MAJOR, lvnum_from_lvid(&lv->lvid));
}
if (lv->read_ahead == DM_READ_AHEAD_AUTO ||
lv->read_ahead == DM_READ_AHEAD_NONE)
lvd->lv_read_ahead = 0;
else
lvd->lv_read_ahead = lv->read_ahead;
lvd->lv_stripes =
dm_list_item(lv->segments.n, struct lv_segment)->area_count;
lvd->lv_stripesize =
dm_list_item(lv->segments.n, struct lv_segment)->stripe_size;
lvd->lv_size = lv->size;
lvd->lv_allocated_le = lv->le_count;
if (lv->status & BADBLOCK_ON)
lvd->lv_badblock = LV_BADBLOCK_ON;
if (lv->alloc == ALLOC_CONTIGUOUS)
lvd->lv_allocation |= LV_CONTIGUOUS;
}
int export_extents(struct disk_list *dl, uint32_t lv_num,
struct logical_volume *lv, struct physical_volume *pv)
{
struct pe_disk *ped;
struct lv_segment *seg;
uint32_t pe, s;
dm_list_iterate_items(seg, &lv->segments) {
for (s = 0; s < seg->area_count; s++) {
if (!(seg->segtype->flags & SEG_FORMAT1_SUPPORT)) {
log_error("Segment type %s in LV %s: "
"unsupported by format1",
lvseg_name(seg), lv->name);
return 0;
}
if (seg_type(seg, s) != AREA_PV) {
log_error("Non-PV stripe found in LV %s: "
"unsupported by format1", lv->name);
return 0;
}
if (seg_pv(seg, s) != pv)
continue; /* not our pv */
for (pe = 0; pe < (seg->len / seg->area_count); pe++) {
ped = &dl->extents[pe + seg_pe(seg, s)];
ped->lv_num = lv_num;
ped->le_num = (seg->le / seg->area_count) + pe +
s * (lv->le_count / seg->area_count);
}
}
}
return 1;
}
int import_pvs(const struct format_type *fmt, struct dm_pool *mem,
struct volume_group *vg, struct dm_list *pvds)
{
struct disk_list *dl;
struct pv_list *pvl;
vg->pv_count = 0;
dm_list_iterate_items(dl, pvds) {
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
!(pvl->pv = dm_pool_alloc(mem, sizeof(*pvl->pv))))
return_0;
if (!import_pv(fmt, mem, dl->dev, vg, pvl->pv, &dl->pvd, &dl->vgd))
return_0;
pvl->pv->fmt = fmt;
add_pvl_to_vgs(vg, pvl);
}
return 1;
}
static struct logical_volume *_add_lv(struct dm_pool *mem,
struct volume_group *vg,
struct lv_disk *lvd)
{
struct logical_volume *lv;
if (!(lv = alloc_lv(mem)))
return_NULL;
lvid_from_lvnum(&lv->lvid, &vg->id, lvd->lv_number);
if (!import_lv(vg->cmd, mem, lv, lvd))
goto_bad;
if (!link_lv_to_vg(vg, lv))
goto_bad;
return lv;
bad:
dm_pool_free(mem, lv);
return NULL;
}
int import_lvs(struct dm_pool *mem, struct volume_group *vg, struct dm_list *pvds)
{
struct disk_list *dl;
struct lvd_list *ll;
struct lv_disk *lvd;
dm_list_iterate_items(dl, pvds) {
dm_list_iterate_items(ll, &dl->lvds) {
lvd = &ll->lvd;
if (!find_lv(vg, (char *)lvd->lv_name) &&
!_add_lv(mem, vg, lvd))
return_0;
}
}
return 1;
}
/* FIXME: tidy */
int export_lvs(struct disk_list *dl, struct volume_group *vg,
struct physical_volume *pv, const char *dev_dir)
{
int r = 0;
struct lv_list *ll;
struct lvd_list *lvdl;
size_t len;
uint32_t lv_num;
struct dm_hash_table *lvd_hash;
if (!_check_vg_name(vg->name))
return_0;
if (!(lvd_hash = dm_hash_create(32)))
return_0;
/*
* setup the pv's extents array
*/
len = sizeof(struct pe_disk) * dl->pvd.pe_total;
if (!(dl->extents = dm_pool_zalloc(dl->mem, len)))
goto_out;
dm_list_iterate_items(ll, &vg->lvs) {
if (lv_is_snapshot(ll->lv))
continue;
if (!(lvdl = dm_pool_alloc(dl->mem, sizeof(*lvdl))))
goto_out;
_export_lv(&lvdl->lvd, vg, ll->lv, dev_dir);
lv_num = lvnum_from_lvid(&ll->lv->lvid);
lvdl->lvd.lv_number = lv_num;
if (!dm_hash_insert(lvd_hash, ll->lv->name, &lvdl->lvd))
goto_out;
if (!export_extents(dl, lv_num + 1, ll->lv, pv))
goto_out;
if (lv_is_origin(ll->lv))
lvdl->lvd.lv_access |= LV_SNAPSHOT_ORG;
if (lv_is_cow(ll->lv)) {
lvdl->lvd.lv_access |= LV_SNAPSHOT;
lvdl->lvd.lv_chunk_size = ll->lv->snapshot->chunk_size;
lvdl->lvd.lv_snapshot_minor =
lvnum_from_lvid(&ll->lv->snapshot->origin->lvid);
}
dm_list_add(&dl->lvds, &lvdl->list);
dl->pvd.lv_cur++;
}
r = 1;
out:
dm_hash_destroy(lvd_hash);
return r;
}
/*
* FIXME: More inefficient code.
*/
int import_snapshots(struct dm_pool *mem __attribute__((unused)), struct volume_group *vg,
struct dm_list *pvds)
{
struct logical_volume *lvs[MAX_LV] = { 0 };
struct disk_list *dl;
struct lvd_list *ll;
struct lv_disk *lvd;
int lvnum;
struct logical_volume *org, *cow;
/* build an index of lv numbers */
dm_list_iterate_items(dl, pvds) {
dm_list_iterate_items(ll, &dl->lvds) {
lvd = &ll->lvd;
lvnum = lvd->lv_number;
if (lvnum >= MAX_LV) {
log_error("Logical volume number "
"out of bounds.");
return 0;
}
if (!lvs[lvnum] &&
!(lvs[lvnum] = find_lv(vg, (char *)lvd->lv_name))) {
log_error("Couldn't find logical volume '%s'.",
lvd->lv_name);
return 0;
}
}
}
/*
* Now iterate through yet again adding the snapshots.
*/
dm_list_iterate_items(dl, pvds) {
dm_list_iterate_items(ll, &dl->lvds) {
lvd = &ll->lvd;
if (!(lvd->lv_access & LV_SNAPSHOT))
continue;
lvnum = lvd->lv_number;
cow = lvs[lvnum];
if (!(org = lvs[lvd->lv_snapshot_minor])) {
log_error("Couldn't find origin logical volume "
"for snapshot '%s'.", lvd->lv_name);
return 0;
}
/* we may have already added this snapshot */
if (lv_is_cow(cow))
continue;
/* insert the snapshot */
if (!vg_add_snapshot(org, cow, NULL,
org->le_count,
lvd->lv_chunk_size)) {
log_error("Couldn't add snapshot.");
return 0;
}
}
}
return 1;
}
int export_uuids(struct disk_list *dl, struct volume_group *vg)
{
struct uuid_list *ul;
struct pv_list *pvl;
dm_list_iterate_items(pvl, &vg->pvs) {
if (!(ul = dm_pool_alloc(dl->mem, sizeof(*ul))))
return_0;
memset(ul->uuid, 0, sizeof(ul->uuid));
memcpy(ul->uuid, pvl->pv->id.uuid, ID_LEN);
dm_list_add(&dl->uuids, &ul->list);
}
return 1;
}
/*
* This calculates the nasty pv_number field
* used by LVM1.
*/
void export_numbers(struct dm_list *pvds, struct volume_group *vg __attribute__((unused)))
{
struct disk_list *dl;
int pv_num = 1;
dm_list_iterate_items(dl, pvds)
dl->pvd.pv_number = pv_num++;
}
/*
* Calculate vg_disk->pv_act.
*/
void export_pv_act(struct dm_list *pvds)
{
struct disk_list *dl;
int act = 0;
dm_list_iterate_items(dl, pvds)
if (dl->pvd.pv_status & PV_ACTIVE)
act++;
dm_list_iterate_items(dl, pvds)
dl->vgd.pv_act = act;
}
int export_vg_number(struct format_instance *fid, struct dm_list *pvds,
const char *vg_name, struct dev_filter *filter)
{
struct disk_list *dl;
int vg_num;
if (!get_free_vg_number(fid, filter, vg_name, &vg_num))
return_0;
dm_list_iterate_items(dl, pvds)
dl->vgd.vg_number = vg_num;
return 1;
}

View File

@@ -0,0 +1,377 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "metadata.h"
#include "disk-rep.h"
#include "lv_alloc.h"
#include "display.h"
#include "segtype.h"
/*
* After much thought I have decided it is easier,
* and probably no less efficient, to convert the
* pe->le map to a full le->pe map, and then
* process this to get the segments form that
* we're after. Any code which goes directly from
* the pe->le map to segments would be gladly
* accepted, if it is less complicated than this
* file.
*/
struct pe_specifier {
struct physical_volume *pv;
uint32_t pe;
};
struct lv_map {
struct logical_volume *lv;
uint32_t stripes;
uint32_t stripe_size;
struct pe_specifier *map;
};
static struct dm_hash_table *_create_lv_maps(struct dm_pool *mem,
struct volume_group *vg)
{
struct dm_hash_table *maps = dm_hash_create(32);
struct lv_list *ll;
struct lv_map *lvm;
if (!maps) {
log_error("Unable to create hash table for holding "
"extent maps.");
return NULL;
}
dm_list_iterate_items(ll, &vg->lvs) {
if (lv_is_snapshot(ll->lv))
continue;
if (!(lvm = dm_pool_alloc(mem, sizeof(*lvm))))
goto_bad;
lvm->lv = ll->lv;
/*
* Alloc 1 extra element, so the loop in _area_length() and
* _check_stripe() finds the last map member as noncontinuous.
*/
if (!(lvm->map = dm_pool_zalloc(mem, sizeof(*lvm->map)
* (ll->lv->le_count + 1))))
goto_bad;
if (!dm_hash_insert(maps, ll->lv->name, lvm))
goto_bad;
}
return maps;
bad:
dm_hash_destroy(maps);
return NULL;
}
static int _fill_lv_array(struct lv_map **lvs,
struct dm_hash_table *maps, struct disk_list *dl)
{
struct lvd_list *ll;
struct lv_map *lvm;
memset(lvs, 0, sizeof(*lvs) * MAX_LV);
dm_list_iterate_items(ll, &dl->lvds) {
if (!(lvm = dm_hash_lookup(maps, strrchr((char *)ll->lvd.lv_name, '/')
+ 1))) {
log_error("Physical volume (%s) contains an "
"unknown logical volume (%s).",
dev_name(dl->dev), ll->lvd.lv_name);
return 0;
}
lvm->stripes = ll->lvd.lv_stripes;
lvm->stripe_size = ll->lvd.lv_stripesize;
lvs[ll->lvd.lv_number] = lvm;
}
return 1;
}
static int _fill_maps(struct dm_hash_table *maps, struct volume_group *vg,
struct dm_list *pvds)
{
struct disk_list *dl;
struct physical_volume *pv;
struct lv_map *lvms[MAX_LV], *lvm;
struct pe_disk *e;
uint32_t i, lv_num, le;
dm_list_iterate_items(dl, pvds) {
if (!(pv = find_pv(vg, dl->dev))) {
log_error("PV %s not found.", dl->dev->pvid);
return 0;
}
e = dl->extents;
/* build an array of lv's for this pv */
if (!_fill_lv_array(lvms, maps, dl))
return_0;
for (i = 0; i < dl->pvd.pe_total; i++) {
lv_num = e[i].lv_num;
if (lv_num == UNMAPPED_EXTENT)
continue;
lv_num--;
lvm = lvms[lv_num];
if (!lvm) {
log_error("Invalid LV in extent map "
"(PV %s, PE %" PRIu32
", LV %" PRIu32
", LE %" PRIu32 ")",
dev_name(pv->dev), i,
lv_num, e[i].le_num);
return 0;
}
le = e[i].le_num;
if (le >= lvm->lv->le_count) {
log_error("logical extent number "
"out of bounds");
return 0;
}
if (lvm->map[le].pv) {
log_error("logical extent (%u) "
"already mapped.", le);
return 0;
}
lvm->map[le].pv = pv;
lvm->map[le].pe = i;
}
}
return 1;
}
static int _check_single_map(struct lv_map *lvm)
{
uint32_t i;
for (i = 0; i < lvm->lv->le_count; i++) {
if (!lvm->map[i].pv) {
log_error("Logical volume (%s) contains an incomplete "
"mapping table.", lvm->lv->name);
return 0;
}
}
return 1;
}
static int _check_maps_are_complete(struct dm_hash_table *maps)
{
struct dm_hash_node *n;
struct lv_map *lvm;
for (n = dm_hash_get_first(maps); n; n = dm_hash_get_next(maps, n)) {
lvm = (struct lv_map *) dm_hash_get_data(maps, n);
if (!_check_single_map(lvm))
return_0;
}
return 1;
}
static uint32_t _area_length(struct lv_map *lvm, uint32_t le)
{
uint32_t len = 0;
do
len++;
while ((lvm->map[le + len].pv == lvm->map[le].pv) &&
(lvm->map[le].pv &&
lvm->map[le + len].pe == lvm->map[le].pe + len));
return len;
}
static int _read_linear(struct cmd_context *cmd, struct lv_map *lvm)
{
uint32_t le = 0, len;
struct lv_segment *seg;
struct segment_type *segtype;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
return_0;
while (le < lvm->lv->le_count) {
len = _area_length(lvm, le);
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0, 0,
NULL, 1, len, 0, 0, 0, 0, NULL))) {
log_error("Failed to allocate linear segment.");
return 0;
}
if (!set_lv_segment_area_pv(seg, 0, lvm->map[le].pv,
lvm->map[le].pe))
return_0;
dm_list_add(&lvm->lv->segments, &seg->list);
le += seg->len;
}
return 1;
}
static int _check_stripe(struct lv_map *lvm, uint32_t area_count,
uint32_t area_len, uint32_t base_le,
uint32_t total_area_len)
{
uint32_t st;
/*
* Is the next physical extent in every stripe adjacent to the last?
*/
for (st = 0; st < area_count; st++)
if ((lvm->map[base_le + st * total_area_len + area_len].pv !=
lvm->map[base_le + st * total_area_len].pv) ||
(lvm->map[base_le + st * total_area_len].pv &&
lvm->map[base_le + st * total_area_len + area_len].pe !=
lvm->map[base_le + st * total_area_len].pe + area_len))
return 0;
return 1;
}
static int _read_stripes(struct cmd_context *cmd, struct lv_map *lvm)
{
uint32_t st, first_area_le = 0, total_area_len;
uint32_t area_len;
struct lv_segment *seg;
struct segment_type *segtype;
/*
* Work out overall striped length
*/
if (lvm->lv->le_count % lvm->stripes) {
log_error("Number of stripes (%u) incompatible "
"with logical extent count (%u) for %s",
lvm->stripes, lvm->lv->le_count, lvm->lv->name);
}
total_area_len = lvm->lv->le_count / lvm->stripes;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
return_0;
while (first_area_le < total_area_len) {
area_len = 1;
/*
* Find how many extents are contiguous in all stripes
* and so can form part of this segment
*/
while (_check_stripe(lvm, lvm->stripes,
area_len, first_area_le, total_area_len))
area_len++;
if (!(seg = alloc_lv_segment(segtype, lvm->lv,
lvm->stripes * first_area_le,
lvm->stripes * area_len, 0,
0, lvm->stripe_size, NULL,
lvm->stripes,
area_len, 0, 0, 0, 0, NULL))) {
log_error("Failed to allocate striped segment.");
return 0;
}
/*
* Set up start positions of each stripe in this segment
*/
for (st = 0; st < seg->area_count; st++)
if (!set_lv_segment_area_pv(seg, st,
lvm->map[first_area_le + st * total_area_len].pv,
lvm->map[first_area_le + st * total_area_len].pe))
return_0;
dm_list_add(&lvm->lv->segments, &seg->list);
first_area_le += area_len;
}
return 1;
}
static int _build_segments(struct cmd_context *cmd, struct lv_map *lvm)
{
return (lvm->stripes > 1 ? _read_stripes(cmd, lvm) :
_read_linear(cmd, lvm));
}
static int _build_all_segments(struct cmd_context *cmd, struct dm_hash_table *maps)
{
struct dm_hash_node *n;
struct lv_map *lvm;
for (n = dm_hash_get_first(maps); n; n = dm_hash_get_next(maps, n)) {
lvm = (struct lv_map *) dm_hash_get_data(maps, n);
if (!_build_segments(cmd, lvm))
return_0;
}
return 1;
}
int import_extents(struct cmd_context *cmd, struct volume_group *vg,
struct dm_list *pvds)
{
int r = 0;
struct dm_pool *scratch = dm_pool_create("lvm1 import_extents", 10 * 1024);
struct dm_hash_table *maps;
if (!scratch)
return_0;
if (!(maps = _create_lv_maps(scratch, vg))) {
log_error("Couldn't allocate logical volume maps.");
goto out;
}
if (!_fill_maps(maps, vg, pvds)) {
log_error("Couldn't fill logical volume maps.");
goto out;
}
if (!_check_maps_are_complete(maps) && !(vg->status & PARTIAL_VG))
goto_out;
if (!_build_all_segments(cmd, maps)) {
log_error("Couldn't build extent segments.");
goto out;
}
r = 1;
out:
if (maps)
dm_hash_destroy(maps);
dm_pool_destroy(scratch);
return r;
}

172
lib/format1/layout.c Normal file
View File

@@ -0,0 +1,172 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "disk-rep.h"
/*
* Only works with powers of 2.
*/
static uint32_t _round_up(uint32_t n, uint32_t size)
{
size--;
return (n + size) & ~size;
}
/* Unused.
static uint32_t _div_up(uint32_t n, uint32_t size)
{
return _round_up(n, size) / size;
}
*/
/*
* Each chunk of metadata should be aligned to
* METADATA_ALIGN.
*/
static uint32_t _next_base(struct data_area *area)
{
return _round_up(area->base + area->size, METADATA_ALIGN);
}
/*
* Quick calculation based on pe_start.
*/
static int _adjust_pe_on_disk(struct pv_disk *pvd)
{
uint32_t pe_start = pvd->pe_start << SECTOR_SHIFT;
if (pe_start < pvd->pe_on_disk.base + pvd->pe_on_disk.size)
return 0;
pvd->pe_on_disk.size = pe_start - pvd->pe_on_disk.base;
return 1;
}
static void _calc_simple_layout(struct pv_disk *pvd)
{
pvd->pv_on_disk.base = METADATA_BASE;
pvd->pv_on_disk.size = PV_SIZE;
pvd->vg_on_disk.base = _next_base(&pvd->pv_on_disk);
pvd->vg_on_disk.size = VG_SIZE;
pvd->pv_uuidlist_on_disk.base = _next_base(&pvd->vg_on_disk);
pvd->pv_uuidlist_on_disk.size = MAX_PV * NAME_LEN;
pvd->lv_on_disk.base = _next_base(&pvd->pv_uuidlist_on_disk);
pvd->lv_on_disk.size = MAX_LV * sizeof(struct lv_disk);
pvd->pe_on_disk.base = _next_base(&pvd->lv_on_disk);
pvd->pe_on_disk.size = pvd->pe_total * sizeof(struct pe_disk);
}
static int _check_vg_limits(struct disk_list *dl)
{
if (dl->vgd.lv_max > MAX_LV) {
log_error("MaxLogicalVolumes of %d exceeds format limit of %d "
"for VG '%s'", dl->vgd.lv_max, MAX_LV - 1,
dl->pvd.vg_name);
return 0;
}
if (dl->vgd.pv_max > MAX_PV) {
log_error("MaxPhysicalVolumes of %d exceeds format limit of %d "
"for VG '%s'", dl->vgd.pv_max, MAX_PV - 1,
dl->pvd.vg_name);
return 0;
}
return 1;
}
/*
* This assumes pe_count and pe_start have already
* been calculated correctly.
*/
int calculate_layout(struct disk_list *dl)
{
struct pv_disk *pvd = &dl->pvd;
_calc_simple_layout(pvd);
if (!_adjust_pe_on_disk(pvd)) {
log_error("Insufficient space for metadata and PE's.");
return 0;
}
if (!_check_vg_limits(dl))
return 0;
return 1;
}
/*
* The number of extents that can fit on a disk is metadata format dependant.
* pe_start is any existing value for pe_start
*/
int calculate_extent_count(struct physical_volume *pv, uint32_t extent_size,
uint32_t max_extent_count, uint64_t pe_start)
{
struct pv_disk *pvd = dm_malloc(sizeof(*pvd));
uint32_t end;
if (!pvd)
return_0;
/*
* Guess how many extents will fit, bearing in mind that
* one is going to be knocked off at the start of the
* next loop.
*/
if (max_extent_count)
pvd->pe_total = max_extent_count + 1;
else
pvd->pe_total = (pv->size / extent_size);
if (pvd->pe_total < PE_SIZE_PV_SIZE_REL) {
log_error("Too few extents on %s. Try smaller extent size.",
pv_dev_name(pv));
dm_free(pvd);
return 0;
}
do {
pvd->pe_total--;
_calc_simple_layout(pvd);
end = ((pvd->pe_on_disk.base + pvd->pe_on_disk.size +
SECTOR_SIZE - 1) >> SECTOR_SHIFT);
if (pe_start && end < pe_start)
end = pe_start;
pvd->pe_start = _round_up(end, LVM1_PE_ALIGN);
} while ((pvd->pe_start + ((uint64_t)pvd->pe_total * extent_size))
> pv->size);
if (pvd->pe_total > MAX_PE_TOTAL) {
log_error("Metadata extent limit (%u) exceeded for %s - "
"%u required", MAX_PE_TOTAL, pv_dev_name(pv),
pvd->pe_total);
dm_free(pvd);
return 0;
}
pv->pe_count = pvd->pe_total;
pv->pe_start = pvd->pe_start;
/* We can't set pe_size here without breaking LVM1 compatibility */
dm_free(pvd);
return 1;
}

130
lib/format1/lvm1-label.c Normal file
View File

@@ -0,0 +1,130 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "lvm1-label.h"
#include "disk-rep.h"
#include "label.h"
#include "metadata.h"
#include "xlate.h"
#include "format1.h"
#include <sys/stat.h>
#include <fcntl.h>
static void _not_supported(const char *op)
{
log_error("The '%s' operation is not supported for the lvm1 labeller.",
op);
}
static int _lvm1_can_handle(struct labeller *l __attribute__((unused)), void *buf, uint64_t sector)
{
struct pv_disk *pvd = (struct pv_disk *) buf;
uint32_t version;
/* LVM1 label must always be in first sector */
if (sector)
return 0;
version = xlate16(pvd->version);
if (pvd->id[0] == 'H' && pvd->id[1] == 'M' &&
(version == 1 || version == 2))
return 1;
return 0;
}
static int _lvm1_write(struct label *label __attribute__((unused)), void *buf __attribute__((unused)))
{
_not_supported("write");
return 0;
}
static int _lvm1_read(struct labeller *l, struct device *dev, void *buf,
struct label **label)
{
struct pv_disk *pvd = (struct pv_disk *) buf;
struct vg_disk vgd;
struct lvmcache_info *info;
const char *vgid = FMT_LVM1_ORPHAN_VG_NAME;
const char *vgname = FMT_LVM1_ORPHAN_VG_NAME;
unsigned exported = 0;
munge_pvd(dev, pvd);
if (*pvd->vg_name) {
if (!read_vgd(dev, &vgd, pvd))
return_0;
vgid = (char *) vgd.vg_uuid;
vgname = (char *) pvd->vg_name;
exported = pvd->pv_status & VG_EXPORTED;
}
if (!(info = lvmcache_add(l, (char *)pvd->pv_uuid, dev, vgname, vgid,
exported)))
return_0;
*label = lvmcache_get_label(info);
lvmcache_set_device_size(info, ((uint64_t)xlate32(pvd->pv_size)) << SECTOR_SHIFT);
lvmcache_set_ext_version(info, 0);
lvmcache_set_ext_flags(info, 0);
lvmcache_del_mdas(info);
lvmcache_del_bas(info);
lvmcache_make_valid(info);
return 1;
}
static int _lvm1_initialise_label(struct labeller *l __attribute__((unused)), struct label *label)
{
strcpy(label->type, "LVM1");
return 1;
}
static void _lvm1_destroy_label(struct labeller *l __attribute__((unused)), struct label *label __attribute__((unused)))
{
}
static void _lvm1_destroy(struct labeller *l)
{
dm_free(l);
}
struct label_ops _lvm1_ops = {
.can_handle = _lvm1_can_handle,
.write = _lvm1_write,
.read = _lvm1_read,
.initialise_label = _lvm1_initialise_label,
.destroy_label = _lvm1_destroy_label,
.destroy = _lvm1_destroy,
};
struct labeller *lvm1_labeller_create(struct format_type *fmt)
{
struct labeller *l;
if (!(l = dm_malloc(sizeof(*l)))) {
log_error("Couldn't allocate labeller object.");
return NULL;
}
l->ops = &_lvm1_ops;
l->fmt = fmt;
return l;
}

23
lib/format1/lvm1-label.h Normal file
View File

@@ -0,0 +1,23 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _LVM_LVM1_LABEL_H
#define _LVM_LVM1_LABEL_H
#include "metadata.h"
struct labeller *lvm1_labeller_create(struct format_type *fmt);
#endif

60
lib/format1/vg_number.c Normal file
View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "disk-rep.h"
/*
* FIXME: Quick hack. We can use caching to
* prevent a total re-read, even so vg_number
* causes the tools to check *every* pv. Yuck.
* Put in separate file so it wouldn't contaminate
* other code.
*/
int get_free_vg_number(struct format_instance *fid, struct dev_filter *filter,
const char *candidate_vg, int *result)
{
struct dm_list all_pvs;
struct disk_list *dl;
struct dm_pool *mem = dm_pool_create("lvm1 vg_number", 10 * 1024);
int i, r = 0, numbers[MAX_VG] = { 0 };
dm_list_init(&all_pvs);
if (!mem)
return_0;
if (!read_pvs_in_vg(fid->fmt, NULL, filter, mem, &all_pvs))
goto_out;
dm_list_iterate_items(dl, &all_pvs) {
if (!*dl->pvd.vg_name || !strcmp((char *)dl->pvd.vg_name, candidate_vg))
continue;
numbers[dl->vgd.vg_number] = 1;
}
for (i = 0; i < MAX_VG; i++) {
if (!numbers[i]) {
r = 1;
*result = i;
break;
}
}
out:
dm_pool_destroy(mem);
return r;
}

View File

@@ -0,0 +1 @@
init_format

View File

@@ -0,0 +1,30 @@
#
# Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES =\
disk_rep.c \
format_pool.c \
import_export.c \
pool_label.c
LIB_SHARED = liblvm2formatpool.$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
include $(top_builddir)/make.tmpl
install: install_lvm2_plugin

412
lib/format_pool/disk_rep.c Normal file
View File

@@ -0,0 +1,412 @@
/*
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "label.h"
#include "metadata.h"
#include "lvmcache.h"
#include "xlate.h"
#include "disk_rep.h"
#include "toolcontext.h"
#include <assert.h>
/* FIXME: memcpy might not be portable */
#define CPIN_8(x, y, z) {memcpy((x), (y), (z));}
#define CPOUT_8(x, y, z) {memcpy((y), (x), (z));}
#define CPIN_16(x, y) {(x) = xlate16_be((y));}
#define CPOUT_16(x, y) {(y) = xlate16_be((x));}
#define CPIN_32(x, y) {(x) = xlate32_be((y));}
#define CPOUT_32(x, y) {(y) = xlate32_be((x));}
#define CPIN_64(x, y) {(x) = xlate64_be((y));}
#define CPOUT_64(x, y) {(y) = xlate64_be((x));}
static int __read_pool_disk(const struct format_type *fmt, struct device *dev,
struct dm_pool *mem __attribute__((unused)), struct pool_list *pl,
const char *vg_name __attribute__((unused)))
{
char buf[512] __attribute__((aligned(8)));
/* FIXME: Need to check the cache here first */
if (!dev_read(dev, UINT64_C(0), 512, DEV_IO_POOL, buf)) {
log_very_verbose("Failed to read PV data from %s",
dev_name(dev));
return 0;
}
if (!read_pool_label(pl, fmt->labeller, dev, buf, NULL))
return_0;
return 1;
}
static void _add_pl_to_list(struct cmd_context *cmd, struct dm_list *head, struct pool_list *data)
{
struct pool_list *pl;
dm_list_iterate_items(pl, head) {
if (id_equal(&data->pv_uuid, &pl->pv_uuid)) {
char uuid[ID_LEN + 7] __attribute__((aligned(8)));
if (!id_write_format(&pl->pv_uuid, uuid, ID_LEN + 7))
stack;
if (!dev_subsystem_part_major(cmd->dev_types, data->dev)) {
log_very_verbose("Ignoring duplicate PV %s on "
"%s", uuid,
dev_name(data->dev));
return;
}
log_very_verbose("Duplicate PV %s - using %s %s",
uuid, dev_subsystem_name(cmd->dev_types, data->dev),
dev_name(data->dev));
dm_list_del(&pl->list);
break;
}
}
dm_list_add(head, &data->list);
}
int read_pool_label(struct pool_list *pl, struct labeller *l,
struct device *dev, char *buf, struct label **label)
{
struct lvmcache_info *info;
struct id pvid;
struct id vgid;
char uuid[ID_LEN + 7] __attribute__((aligned(8)));
struct pool_disk *pd = &pl->pd;
pool_label_in(pd, buf);
get_pool_pv_uuid(&pvid, pd);
if (!id_write_format(&pvid, uuid, ID_LEN + 7))
stack;
log_debug_metadata("Calculated uuid %s for %s", uuid, dev_name(dev));
get_pool_vg_uuid(&vgid, pd);
if (!id_write_format(&vgid, uuid, ID_LEN + 7))
stack;
log_debug_metadata("Calculated uuid %s for %s", uuid, pd->pl_pool_name);
if (!(info = lvmcache_add(l, (char *) &pvid, dev, pd->pl_pool_name,
(char *) &vgid, 0)))
return_0;
if (label)
*label = lvmcache_get_label(info);
lvmcache_set_device_size(info, ((uint64_t)xlate32_be(pd->pl_blocks)) << SECTOR_SHIFT);
lvmcache_set_ext_version(info, 0);
lvmcache_set_ext_flags(info, 0);
lvmcache_del_mdas(info);
lvmcache_del_bas(info);
lvmcache_make_valid(info);
pl->dev = dev;
pl->pv = NULL;
memcpy(&pl->pv_uuid, &pvid, sizeof(pvid));
return 1;
}
/**
* pool_label_out - copies a pool_label_t into a char buffer
* @pl: ptr to a pool_label_t struct
* @buf: ptr to raw space where label info will be copied
*
* This function is important because it takes care of all of
* the endian issues when copying to disk. This way, when
* machines of different architectures are used, they will
* be able to interpret ondisk labels correctly. Always use
* this function before writing to disk.
*/
void pool_label_out(struct pool_disk *pl, void *buf)
{
struct pool_disk *bufpl = (struct pool_disk *) buf;
CPOUT_64(pl->pl_magic, bufpl->pl_magic);
CPOUT_64(pl->pl_pool_id, bufpl->pl_pool_id);
CPOUT_8(pl->pl_pool_name, bufpl->pl_pool_name, POOL_NAME_SIZE);
CPOUT_32(pl->pl_version, bufpl->pl_version);
CPOUT_32(pl->pl_subpools, bufpl->pl_subpools);
CPOUT_32(pl->pl_sp_id, bufpl->pl_sp_id);
CPOUT_32(pl->pl_sp_devs, bufpl->pl_sp_devs);
CPOUT_32(pl->pl_sp_devid, bufpl->pl_sp_devid);
CPOUT_32(pl->pl_sp_type, bufpl->pl_sp_type);
CPOUT_64(pl->pl_blocks, bufpl->pl_blocks);
CPOUT_32(pl->pl_striping, bufpl->pl_striping);
CPOUT_32(pl->pl_sp_dmepdevs, bufpl->pl_sp_dmepdevs);
CPOUT_32(pl->pl_sp_dmepid, bufpl->pl_sp_dmepid);
CPOUT_32(pl->pl_sp_weight, bufpl->pl_sp_weight);
CPOUT_32(pl->pl_minor, bufpl->pl_minor);
CPOUT_32(pl->pl_padding, bufpl->pl_padding);
CPOUT_8(pl->pl_reserve, bufpl->pl_reserve, 184);
}
/**
* pool_label_in - copies a char buffer into a pool_label_t
* @pl: ptr to a pool_label_t struct
* @buf: ptr to raw space where label info is copied from
*
* This function is important because it takes care of all of
* the endian issues when information from disk is about to be
* used. This way, when machines of different architectures
* are used, they will be able to interpret ondisk labels
* correctly. Always use this function before using labels that
* were read from disk.
*/
void pool_label_in(struct pool_disk *pl, void *buf)
{
struct pool_disk *bufpl = (struct pool_disk *) buf;
CPIN_64(pl->pl_magic, bufpl->pl_magic);
CPIN_64(pl->pl_pool_id, bufpl->pl_pool_id);
CPIN_8(pl->pl_pool_name, bufpl->pl_pool_name, POOL_NAME_SIZE);
CPIN_32(pl->pl_version, bufpl->pl_version);
CPIN_32(pl->pl_subpools, bufpl->pl_subpools);
CPIN_32(pl->pl_sp_id, bufpl->pl_sp_id);
CPIN_32(pl->pl_sp_devs, bufpl->pl_sp_devs);
CPIN_32(pl->pl_sp_devid, bufpl->pl_sp_devid);
CPIN_32(pl->pl_sp_type, bufpl->pl_sp_type);
CPIN_64(pl->pl_blocks, bufpl->pl_blocks);
CPIN_32(pl->pl_striping, bufpl->pl_striping);
CPIN_32(pl->pl_sp_dmepdevs, bufpl->pl_sp_dmepdevs);
CPIN_32(pl->pl_sp_dmepid, bufpl->pl_sp_dmepid);
CPIN_32(pl->pl_sp_weight, bufpl->pl_sp_weight);
CPIN_32(pl->pl_minor, bufpl->pl_minor);
CPIN_32(pl->pl_padding, bufpl->pl_padding);
CPIN_8(pl->pl_reserve, bufpl->pl_reserve, 184);
}
static char _calc_char(unsigned int id)
{
/*
* [0-9A-Za-z!#] - 64 printable chars (6-bits)
*/
if (id < 10)
return id + 48;
if (id < 36)
return (id - 10) + 65;
if (id < 62)
return (id - 36) + 97;
if (id == 62)
return '!';
if (id == 63)
return '#';
return '%';
}
void get_pool_uuid(char *uuid, uint64_t poolid, uint32_t spid, uint32_t devid)
{
int i;
unsigned shifter = 0x003F;
assert(ID_LEN == 32);
memset(uuid, 0, ID_LEN);
strcat(uuid, "POOL0000000000");
/* We grab the entire 64 bits (+2 that get shifted in) */
for (i = 13; i < 24; i++) {
uuid[i] = _calc_char(((unsigned) poolid) & shifter);
poolid = poolid >> 6;
}
/* We grab the entire 32 bits (+4 that get shifted in) */
for (i = 24; i < 30; i++) {
uuid[i] = _calc_char((unsigned) (spid & shifter));
spid = spid >> 6;
}
/*
* Since we can only have 128 devices, we only worry about the
* last 12 bits
*/
for (i = 30; i < 32; i++) {
uuid[i] = _calc_char((unsigned) (devid & shifter));
devid = devid >> 6;
}
}
struct _read_pool_pv_baton {
const struct format_type *fmt;
struct dm_pool *mem, *tmpmem;
struct pool_list *pl;
struct dm_list *head;
const char *vgname;
uint32_t *sp_devs;
uint32_t sp_count;
int failed;
int empty;
};
static int _read_pool_pv(struct lvmcache_info *info, void *baton)
{
struct _read_pool_pv_baton *b = baton;
b->empty = 0;
if (lvmcache_device(info) &&
!(b->pl = read_pool_disk(b->fmt, lvmcache_device(info), b->mem, b->vgname)))
return 0;
/*
* We need to keep track of the total expected number
* of devices per subpool
*/
if (!b->sp_count) {
/* FIXME pl left uninitialised if !info->dev */
if (!b->pl) {
log_error(INTERNAL_ERROR "device is missing");
dm_pool_destroy(b->tmpmem);
b->failed = 1;
return 0;
}
b->sp_count = b->pl->pd.pl_subpools;
if (!(b->sp_devs =
dm_pool_zalloc(b->tmpmem,
sizeof(uint32_t) * b->sp_count))) {
log_error("Unable to allocate %d 32-bit uints",
b->sp_count);
dm_pool_destroy(b->tmpmem);
b->failed = 1;
return 0;
}
}
/*
* watch out for a pool label with a different subpool
* count than the original - give up if it does
*/
if (b->sp_count != b->pl->pd.pl_subpools)
return 0;
_add_pl_to_list(lvmcache_fmt(info)->cmd, b->head, b->pl);
if (b->sp_count > b->pl->pd.pl_sp_id && b->sp_devs[b->pl->pd.pl_sp_id] == 0)
b->sp_devs[b->pl->pd.pl_sp_id] = b->pl->pd.pl_sp_devs;
return 1;
}
static int _read_vg_pds(struct _read_pool_pv_baton *b,
struct lvmcache_vginfo *vginfo,
uint32_t *devcount)
{
uint32_t i;
b->sp_count = 0;
b->sp_devs = NULL;
b->failed = 0;
b->pl = NULL;
/* FIXME: maybe should return a different error in memory
* allocation failure */
if (!(b->tmpmem = dm_pool_create("pool read_vg", 512)))
return_0;
lvmcache_foreach_pv(vginfo, _read_pool_pv, b);
*devcount = 0;
for (i = 0; i < b->sp_count; i++)
*devcount += b->sp_devs[i];
dm_pool_destroy(b->tmpmem);
if (b->pl && *b->pl->pd.pl_pool_name)
return 1;
return 0;
}
int read_pool_pds(const struct format_type *fmt, const char *vg_name,
struct dm_pool *mem, struct dm_list *pdhead)
{
struct lvmcache_vginfo *vginfo;
uint32_t totaldevs;
int full_scan = -1;
struct _read_pool_pv_baton baton;
baton.vgname = vg_name;
baton.mem = mem;
baton.fmt = fmt;
baton.head = pdhead;
baton.empty = 1;
do {
/*
* If the cache scanning doesn't work, this will never work
*/
if (vg_name && (vginfo = lvmcache_vginfo_from_vgname(vg_name, NULL)) &&
_read_vg_pds(&baton, vginfo, &totaldevs) && !baton.empty)
{
/*
* If we found all the devices we were expecting, return
* success
*/
if (dm_list_size(pdhead) == totaldevs)
return 1;
/*
* accept partial pool if we've done a full rescan of
* the cache
*/
if (full_scan > 0)
return 1;
}
/* Failed */
dm_list_init(pdhead);
full_scan++;
if (full_scan > 1) {
log_debug_metadata("No devices for vg %s found in cache",
vg_name);
return 0;
}
if (full_scan > 0)
lvmcache_force_next_label_scan();
lvmcache_label_scan(fmt->cmd);
} while (1);
}
struct pool_list *read_pool_disk(const struct format_type *fmt,
struct device *dev, struct dm_pool *mem,
const char *vg_name)
{
struct pool_list *pl;
if (!dev_open_readonly(dev))
return_NULL;
if (!(pl = dm_pool_zalloc(mem, sizeof(*pl)))) {
log_error("Unable to allocate pool list structure");
return 0;
}
if (!__read_pool_disk(fmt, dev, mem, pl, vg_name))
return_NULL;
if (!dev_close(dev))
stack;
return pl;
}

156
lib/format_pool/disk_rep.h Normal file
View File

@@ -0,0 +1,156 @@
/*
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef DISK_REP_FORMAT_POOL_H
#define DISK_REP_FORMAT_POOL_H
#include "label.h"
#include "metadata.h"
#define MINOR_OFFSET 65536
/* From NSP.cf */
#define NSPMajorVersion 4
#define NSPMinorVersion 1
#define NSPUpdateLevel 3
/* From pool_std.h */
#define POOL_NAME_SIZE (256)
#define POOL_MAGIC 0x011670
#define POOL_MAJOR (121)
#define POOL_MAX_DEVICES 128
/* When checking for version matching, the first two numbers **
** are important for metadata formats, a.k.a pool labels. **
** All the numbers are important when checking if the user **
** space tools match up with the kernel module............. */
#define POOL_VERSION (NSPMajorVersion << 16 | \
NSPMinorVersion << 8 | \
NSPUpdateLevel)
/* Pool label is at the head of every pool disk partition */
#define SIZEOF_POOL_LABEL (8192)
/* in sectors */
#define POOL_PE_SIZE (SIZEOF_POOL_LABEL >> SECTOR_SHIFT)
#define POOL_PE_START (SIZEOF_POOL_LABEL >> SECTOR_SHIFT)
/* Helper fxns */
#define get_pool_vg_uuid(id, pd) do { get_pool_uuid((char *)(id), \
(pd)->pl_pool_id, 0, 0); \
} while(0)
#define get_pool_pv_uuid(id, pd) do { get_pool_uuid((char *)(id), \
(pd)->pl_pool_id, \
(pd)->pl_sp_id, \
(pd)->pl_sp_devid); \
} while(0)
#define get_pool_lv_uuid(id, pd) do { get_pool_uuid((char *)&(id)[0], \
(pd)->pl_pool_id, 0, 0); \
get_pool_uuid((char*)&(id)[1], \
(pd)->pl_pool_id, 0, 0); \
} while(0)
struct pool_disk;
struct pool_list;
struct user_subpool;
struct user_device;
struct pool_disk {
uint64_t pl_magic; /* Pool magic number */
uint64_t pl_pool_id; /* Unique pool identifier */
char pl_pool_name[POOL_NAME_SIZE]; /* Name of pool */
uint32_t pl_version; /* Pool version */
uint32_t pl_subpools; /* Number of subpools in this pool */
uint32_t pl_sp_id; /* Subpool number within pool */
uint32_t pl_sp_devs; /* Number of data partitions in this subpool */
uint32_t pl_sp_devid; /* Partition number within subpool */
uint32_t pl_sp_type; /* Partition type */
uint64_t pl_blocks; /* Number of blocks in this partition */
uint32_t pl_striping; /* Striping size within subpool */
/*
* If the number of DMEP devices is zero, then the next field **
* ** (pl_sp_dmepid) becomes the subpool ID for redirection. In **
* ** other words, if this subpool does not have the capability **
* ** to do DMEP, then it must specify which subpool will do it **
* ** in it's place
*/
/*
* While the next 3 field are no longer used, they must stay to keep **
* ** backward compatibility...........................................
*/
uint32_t pl_sp_dmepdevs;/* Number of dmep devices in this subpool */
uint32_t pl_sp_dmepid; /* Dmep device number within subpool */
uint32_t pl_sp_weight; /* if dmep dev, pref to using it */
uint32_t pl_minor; /* the pool minor number */
uint32_t pl_padding; /* reminder - think about alignment */
/*
* Even though we're zeroing out 8k at the front of the disk before
* writing the label, putting this in
*/
char pl_reserve[184]; /* bump the structure size out to 512 bytes */
};
struct pool_list {
struct dm_list list;
struct pool_disk pd;
struct physical_volume *pv;
struct id pv_uuid;
struct device *dev;
};
struct user_subpool {
uint32_t initialized;
uint32_t id;
uint32_t striping;
uint32_t num_devs;
uint32_t type;
uint32_t dummy;
struct user_device *devs;
};
struct user_device {
uint32_t initialized;
uint32_t sp_id;
uint32_t devid;
uint32_t dummy;
uint64_t blocks;
struct physical_volume *pv;
};
int read_pool_label(struct pool_list *pl, struct labeller *l,
struct device *dev, char *buf, struct label **label);
void pool_label_out(struct pool_disk *pl, void *buf);
void pool_label_in(struct pool_disk *pl, void *buf);
void get_pool_uuid(char *uuid, uint64_t poolid, uint32_t spid, uint32_t devid);
int import_pool_vg(struct volume_group *vg, struct dm_pool *mem, struct dm_list *pls);
int import_pool_lvs(struct volume_group *vg, struct dm_pool *mem,
struct dm_list *pls);
int import_pool_pvs(const struct format_type *fmt, struct volume_group *vg,
struct dm_pool *mem, struct dm_list *pls);
int import_pool_pv(const struct format_type *fmt, struct dm_pool *mem,
struct volume_group *vg, struct physical_volume *pv,
struct pool_list *pl);
int import_pool_segments(struct dm_list *lvs, struct dm_pool *mem,
struct user_subpool *usp, int sp_count);
int read_pool_pds(const struct format_type *fmt, const char *vgname,
struct dm_pool *mem, struct dm_list *head);
struct pool_list *read_pool_disk(const struct format_type *fmt,
struct device *dev, struct dm_pool *mem,
const char *vg_name);
#endif /* DISK_REP_POOL_FORMAT_H */

View File

@@ -0,0 +1,338 @@
/*
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "label.h"
#include "metadata.h"
#include "limits.h"
#include "display.h"
#include "toolcontext.h"
#include "lvmcache.h"
#include "disk_rep.h"
#include "format_pool.h"
#include "pool_label.h"
/* Must be called after pvs are imported */
static struct user_subpool *_build_usp(struct dm_list *pls, struct dm_pool *mem,
int *sps)
{
struct pool_list *pl;
struct user_subpool *usp = NULL, *cur_sp = NULL;
struct user_device *cur_dev = NULL;
/*
* FIXME: Need to do some checks here - I'm tempted to add a
* user_pool structure and build the entire thing to check against.
*/
dm_list_iterate_items(pl, pls) {
*sps = pl->pd.pl_subpools;
if (!usp && (!(usp = dm_pool_zalloc(mem, sizeof(*usp) * (*sps))))) {
log_error("Unable to allocate %d subpool structures",
*sps);
return 0;
}
if (cur_sp != &usp[pl->pd.pl_sp_id]) {
cur_sp = &usp[pl->pd.pl_sp_id];
cur_sp->id = pl->pd.pl_sp_id;
cur_sp->striping = pl->pd.pl_striping;
cur_sp->num_devs = pl->pd.pl_sp_devs;
cur_sp->type = pl->pd.pl_sp_type;
cur_sp->initialized = 1;
}
if (!cur_sp->devs &&
(!(cur_sp->devs =
dm_pool_zalloc(mem,
sizeof(*usp->devs) * pl->pd.pl_sp_devs)))) {
log_error("Unable to allocate %d pool_device "
"structures", pl->pd.pl_sp_devs);
return 0;
}
cur_dev = &cur_sp->devs[pl->pd.pl_sp_devid];
cur_dev->sp_id = cur_sp->id;
cur_dev->devid = pl->pd.pl_sp_id;
cur_dev->blocks = pl->pd.pl_blocks;
cur_dev->pv = pl->pv;
cur_dev->initialized = 1;
}
return usp;
}
static int _check_usp(const char *vgname, struct user_subpool *usp, int sp_count)
{
int i;
unsigned j;
for (i = 0; i < sp_count; i++) {
if (!usp[i].initialized) {
log_error("Missing subpool %d in pool %s", i, vgname);
return 0;
}
for (j = 0; j < usp[i].num_devs; j++) {
if (!usp[i].devs[j].initialized) {
log_error("Missing device %u for subpool %d"
" in pool %s", j, i, vgname);
return 0;
}
}
}
return 1;
}
static struct volume_group *_pool_vg_read(struct format_instance *fid,
const char *vg_name,
struct metadata_area *mda __attribute__((unused)),
struct cached_vg_fmtdata **vg_fmtdata __attribute__((unused)),
unsigned *use_previous_vg __attribute__((unused)),
int single_device __attribute__((unused)))
{
struct volume_group *vg;
struct user_subpool *usp;
int sp_count;
DM_LIST_INIT(pds);
/* We can safely ignore the mda passed in */
/* Strip dev_dir if present */
if (vg_name)
vg_name = strip_dir(vg_name, fid->fmt->cmd->dev_dir);
/* Set vg_name through read_pool_pds() */
if (!(vg = alloc_vg("pool_vg_read", fid->fmt->cmd, NULL)))
return_NULL;
/* Read all the pvs in the vg */
if (!read_pool_pds(fid->fmt, vg_name, vg->vgmem, &pds))
goto_bad;
/* Setting pool seqno to 1 because the code always did this,
* although we don't think it's needed. */
vg->seqno = 1;
if (!import_pool_vg(vg, vg->vgmem, &pds))
goto_bad;
if (!import_pool_pvs(fid->fmt, vg, vg->vgmem, &pds))
goto_bad;
if (!import_pool_lvs(vg, vg->vgmem, &pds))
goto_bad;
/*
* I need an intermediate subpool structure that contains all the
* relevant info for this. Then i can iterate through the subpool
* structures for checking, and create the segments
*/
if (!(usp = _build_usp(&pds, vg->vgmem, &sp_count)))
goto_bad;
/*
* check the subpool structures - we can't handle partial VGs in
* the pool format, so this will error out if we're missing PVs
*/
if (!_check_usp(vg->name, usp, sp_count))
goto_bad;
if (!import_pool_segments(&vg->lvs, vg->vgmem, usp, sp_count))
goto_bad;
vg_set_fid(vg, fid);
return vg;
bad:
release_vg(vg);
return NULL;
}
static int _pool_pv_initialise(const struct format_type *fmt __attribute__((unused)),
struct pv_create_args *pva __attribute__((unused)),
struct physical_volume *pv __attribute__((unused)))
{
return 1;
}
static int _pool_pv_setup(const struct format_type *fmt __attribute__((unused)),
struct physical_volume *pv __attribute__((unused)),
struct volume_group *vg __attribute__((unused)))
{
return 1;
}
static int _pool_pv_read(const struct format_type *fmt, const char *pv_name,
struct physical_volume *pv,
int scan_label_only __attribute__((unused)))
{
struct dm_pool *mem = dm_pool_create("pool pv_read", 1024);
struct pool_list *pl;
struct device *dev;
int r = 0;
log_very_verbose("Reading physical volume data %s from disk", pv_name);
if (!mem)
return_0;
if (!(dev = dev_cache_get(pv_name, fmt->cmd->filter)))
goto_out;
/*
* I need to read the disk and populate a pv structure here
* I'll probably need to abstract some of this later for the
* vg_read code
*/
if (!(pl = read_pool_disk(fmt, dev, mem, NULL)))
goto_out;
if (!import_pool_pv(fmt, fmt->cmd->mem, NULL, pv, pl))
goto_out;
pv->fmt = fmt;
r = 1;
out:
dm_pool_destroy(mem);
return r;
}
/* *INDENT-OFF* */
static struct metadata_area_ops _metadata_format_pool_ops = {
.vg_read = _pool_vg_read,
};
/* *INDENT-ON* */
static struct format_instance *_pool_create_instance(const struct format_type *fmt,
const struct format_instance_ctx *fic)
{
struct format_instance *fid;
struct metadata_area *mda;
if (!(fid = alloc_fid(fmt, fic)))
return_NULL;
/* Define a NULL metadata area */
if (!(mda = dm_pool_zalloc(fid->mem, sizeof(*mda)))) {
log_error("Unable to allocate metadata area structure "
"for pool format");
goto bad;
}
mda->ops = &_metadata_format_pool_ops;
mda->metadata_locn = NULL;
mda->status = 0;
dm_list_add(&fid->metadata_areas_in_use, &mda->list);
return fid;
bad:
dm_pool_destroy(fid->mem);
return NULL;
}
static void _pool_destroy_instance(struct format_instance *fid)
{
if (--fid->ref_count <= 1)
dm_pool_destroy(fid->mem);
}
static void _pool_destroy(struct format_type *fmt)
{
if (fmt->orphan_vg)
free_orphan_vg(fmt->orphan_vg);
dm_free(fmt);
}
/* *INDENT-OFF* */
static struct format_handler _format_pool_ops = {
.pv_read = _pool_pv_read,
.pv_initialise = _pool_pv_initialise,
.pv_setup = _pool_pv_setup,
.create_instance = _pool_create_instance,
.destroy_instance = _pool_destroy_instance,
.destroy = _pool_destroy,
};
/* *INDENT-ON */
#ifdef POOL_INTERNAL
struct format_type *init_pool_format(struct cmd_context *cmd)
#else /* Shared */
struct format_type *init_format(struct cmd_context *cmd);
struct format_type *init_format(struct cmd_context *cmd)
#endif
{
struct format_type *fmt = dm_malloc(sizeof(*fmt));
struct format_instance_ctx fic;
struct format_instance *fid;
if (!fmt) {
log_error("Unable to allocate format type structure for pool "
"format");
return NULL;
}
fmt->cmd = cmd;
fmt->ops = &_format_pool_ops;
fmt->name = FMT_POOL_NAME;
fmt->alias = NULL;
fmt->orphan_vg_name = FMT_POOL_ORPHAN_VG_NAME;
fmt->features = FMT_OBSOLETE;
fmt->private = NULL;
dm_list_init(&fmt->mda_ops);
if (!(fmt->labeller = pool_labeller_create(fmt))) {
log_error("Couldn't create pool label handler.");
dm_free(fmt);
return NULL;
}
if (!(label_register_handler(fmt->labeller))) {
log_error("Couldn't register pool label handler.");
fmt->labeller->ops->destroy(fmt->labeller);
dm_free(fmt);
return NULL;
}
if (!(fmt->orphan_vg = alloc_vg("pool_orphan", cmd, fmt->orphan_vg_name))) {
log_error("Couldn't create pool orphan VG.");
dm_free(fmt);
return NULL;
}
fic.type = FMT_INSTANCE_AUX_MDAS;
fic.context.vg_ref.vg_name = fmt->orphan_vg_name;
fic.context.vg_ref.vg_id = NULL;
if (!(fid = _pool_create_instance(fmt, &fic))) {
_pool_destroy(fmt);
return NULL;
}
vg_set_fid(fmt->orphan_vg, fid);
log_very_verbose("Initialised format: %s", fmt->name);
return fmt;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _LVM_FORMAT_POOL_H
#define _LVM_FORMAT_POOL_H
#include "metadata.h"
#define FMT_POOL_NAME "pool"
#define FMT_POOL_ORPHAN_VG_NAME ORPHAN_VG_NAME(FMT_POOL_NAME)
#ifdef POOL_INTERNAL
struct format_type *init_pool_format(struct cmd_context *cmd);
#endif
#endif

Some files were not shown because too many files have changed in this diff Show More