1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-08-20 21:49:28 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
2245725da2 commands: new method for defining commands
. Define a prototype for every lvm command.
. Verify every user command matches one.
. Generate help text and man pages from them.

The new file command-lines.in defines a prototype for every
unique lvm command.  A unique lvm command is a unique
combination of: command name + required option args +
required positional args.  Each of these prototypes also
includes the optional option args and optional positional
args that the command will accept, a description, and a
unique string ID for the definition.  Any valid command
will match one of the prototypes.

Here's an example of the lvresize command definitions from
command-lines.in, there are three unique lvresize commands:

lvresize --size SizeMB LV
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync, --reportformat String, --resizefs,
--stripes Number, --stripesize SizeKB, --test, --poolmetadatasize SizeMB
OP: PV ...
ID: lvresize_by_size
DESC: Resize an LV by a specified size.

lvresize LV PV ...
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --resizefs, --stripes Number, --stripesize SizeKB,
--test
ID: lvresize_by_pv
DESC: Resize an LV by a specified PV.

lvresize --poolmetadatasize SizeMB LV_thinpool
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --stripes Number, --stripesize SizeKB,
--test
OP: PV ...
ID: lvresize_pool_metadata_by_size
DESC: Resize the metadata SubLV of a pool LV.

The three commands have separate definitions because they have
different required parameters.  Required parameters are specified
on the first line of the definition.  Optional options are
listed after OO, and optional positional args are listed after OP.

This data is used to generate corresponding command definition
structures for lvm in command-lines.h.  "usage" text is also
generated, so it is always in sync with the definitions.

Example of the corresponding generated structure in
command-lines.h for the first lvresize prototype
(these structures are never edited directly):

commands[78].name = "lvresize";
commands[78].command_line_id = "lvresize_by_size";
commands[78].command_line_enum = lvresize_by_size_CMD;
commands[78].fn = lvresize;
commands[78].ro_count = 1;
commands[78].rp_count = 1;
commands[78].oo_count = 22;
commands[78].op_count = 1;
commands[78].desc = "DESC: Resize an LV by a specified size.";
commands[78].usage = "lvresize --size Number[m|unit] LV"
" [ --alloc contiguous|cling|normal|anywhere|inherit,
   --autobackup y|n, --nofsck, --nosync, --reportformat String,
   --resizefs, --stripes Number, --stripesize Number[k|unit],
   --poolmetadatasize Number[m|unit] ]"
" [ PV ... ]";
commands[78].usage_common =
" [ --commandprofile String, --config String, --debug,
    --driverloaded y|n, --help, --profile String, --quiet,
    --verbose, --version, --yes, --force, --test, --noudevsync ]";
commands[78].required_opt_args[0].opt = size_ARG;
commands[78].required_opt_args[0].def.val_bits = val_enum_to_bit(sizemb_VAL);
commands[78].required_pos_args[0].pos = 1;
commands[78].required_pos_args[0].def.val_bits = val_enum_to_bit(lv_VAL);
commands[78].optional_opt_args[0].opt = commandprofile_ARG;
commands[78].optional_opt_args[0].def.val_bits = val_enum_to_bit(string_VAL);
commands[78].optional_opt_args[1].opt = config_ARG;
commands[78].optional_opt_args[1].def.val_bits = val_enum_to_bit(string_VAL);
commands[78].optional_opt_args[2].opt = debug_ARG;
commands[78].optional_opt_args[3].opt = driverloaded_ARG;
commands[78].optional_opt_args[3].def.val_bits = val_enum_to_bit(bool_VAL);
commands[78].optional_opt_args[4].opt = help_ARG;
commands[78].optional_opt_args[5].opt = profile_ARG;
commands[78].optional_opt_args[5].def.val_bits = val_enum_to_bit(string_VAL);
commands[78].optional_opt_args[6].opt = quiet_ARG;
commands[78].optional_opt_args[7].opt = verbose_ARG;
commands[78].optional_opt_args[8].opt = version_ARG;
commands[78].optional_opt_args[9].opt = yes_ARG;
commands[78].optional_opt_args[10].opt = alloc_ARG;
commands[78].optional_opt_args[10].def.val_bits = val_enum_to_bit(alloc_VAL);
commands[78].optional_opt_args[11].opt = autobackup_ARG;
commands[78].optional_opt_args[11].def.val_bits = val_enum_to_bit(bool_VAL);
commands[78].optional_opt_args[12].opt = force_ARG;
commands[78].optional_opt_args[13].opt = nofsck_ARG;
commands[78].optional_opt_args[14].opt = nosync_ARG;
commands[78].optional_opt_args[15].opt = noudevsync_ARG;
commands[78].optional_opt_args[16].opt = reportformat_ARG;
commands[78].optional_opt_args[16].def.val_bits = val_enum_to_bit(string_VAL);
commands[78].optional_opt_args[17].opt = resizefs_ARG;
commands[78].optional_opt_args[18].opt = stripes_ARG;
commands[78].optional_opt_args[18].def.val_bits = val_enum_to_bit(number_VAL);
commands[78].optional_opt_args[19].opt = stripesize_ARG;
commands[78].optional_opt_args[19].def.val_bits = val_enum_to_bit(sizekb_VAL);
commands[78].optional_opt_args[20].opt = test_ARG;
commands[78].optional_opt_args[21].opt = poolmetadatasize_ARG;
commands[78].optional_opt_args[21].def.val_bits = val_enum_to_bit(sizemb_VAL);
commands[78].optional_pos_args[0].pos = 2;
commands[78].optional_pos_args[0].def.val_bits = val_enum_to_bit(pv_VAL);
commands[78].optional_pos_args[0].def.flags = ARG_DEF_FLAG_MAY_REPEAT;

Every user-entered command is compared against the set of
command structures, and matched with one.  An error is
reported if an entered command does not have the required
parameters for any definition.  The closest match is printed
as a suggestion, and running lvresize --help will display
the usage for each possible lvresize command, e.g.:

$ lvresize --help
  lvresize - Resize a logical volume

  Resize an LV by a specified size.
  lvresize --size Number[m|unit] LV
  	[ --alloc contiguous|cling|normal|anywhere|inherit,
	  --autobackup y|n,
	  --nofsck,
	  --nosync,
	  --reportformat String,
	  --resizefs,
	  --stripes Number,
	  --stripesize Number[k|unit],
	  --poolmetadatasize Number[m|unit] ]
  	[ PV ... ]

  Resize an LV by a specified PV.
  lvresize LV PV ...
  	[ --alloc contiguous|cling|normal|anywhere|inherit,
	  --autobackup y|n,
	  --nofsck,
	  --nosync,
	  --reportformat String,
	  --resizefs,
	  --stripes Number,
	  --stripesize Number[k|unit] ]

  Resize the metadata SubLV of a pool LV.
  lvresize --poolmetadatasize Number[m|unit] LV_thinpool
  	[ --alloc contiguous|cling|normal|anywhere|inherit,
	  --autobackup y|n,
	  --nofsck,
	  --nosync,
	  --reportformat String,
	  --stripes Number,
	  --stripesize Number[k|unit] ]
  	[ PV ... ]

  Common options:
  	[ --commandprofile String,
	  --config String,
	  --debug,
	  --driverloaded y|n,
	  --help,
	  --profile String,
	  --quiet,
	  --verbose,
	  --version,
	  --yes,
	  --force,
	  --test,
	  --noudevsync ]

  (Use --help --help for usage notes.)

$ lvresize --poolmetadatasize 4
  Failed to find a matching command definition.
  Closest command usage is:
  lvresize --poolmetadatasize Number[m|unit] LV_thinpool

Man page prototypes are also generated from the same original
command definitions, and are always in sync with the code
and help text.

Very early in command execution, a matching command definition
is found.  lvm then knows the operation being done, and that
the provided args conform to the definition.  This will allow
lots of ad hoc checking/validation to be removed throughout
the code.

Each command definition can also be routed to a specific
function to implement it.  The function is associated with
an enum value for the command definition (generated from
the ID string.)  These per-command-definition implementation
functions have not yet been created, so all commands
currently fall back to the existing implementation.

Using per-command-definition functions will allow lots of
code to be removed which tries to figure out what the
command is meant to do.  This is currently based on ad hoc
and complicated option analysis.  When using the new
functions, what the command is doing is already known
from the associated command definition.

So, this first phase validates every user-entered command
against the set of command prototypes, then calls the existing
implementation.  The second phase can associate an implementation
function with each definition, and take further advantage of the
known operation to avoid the complicated option analysis.
2016-10-17 16:45:49 -05:00
1322 changed files with 102238 additions and 175657 deletions

117
.gitignore vendored
View File

@ -1,7 +1,6 @@
*.5
*.7
*.8
*.8_gen
*.a
*.d
*.o
@ -25,123 +24,9 @@ make.tmpl
/autom4te.cache/
/autoscan.log
/build/
/config.cache
/config.log
/config.status
/configure.scan
/cscope.*
/html/
/python/
/reports/
/cscope.out
/tags
/tmp/
coverity/coverity_model.xml
# gcov files:
*.gcda
*.gcno
tools/man-generator
tools/man-generator.c
test/.lib-dir-stamp
test/.tests-stamp
test/lib/dmsecuretest
test/lib/lvchange
test/lib/lvconvert
test/lib/lvcreate
test/lib/lvdisplay
test/lib/lvextend
test/lib/lvmconfig
test/lib/lvmdiskscan
test/lib/lvmsadc
test/lib/lvmsar
test/lib/lvreduce
test/lib/lvremove
test/lib/lvrename
test/lib/lvresize
test/lib/lvs
test/lib/lvscan
test/lib/pvchange
test/lib/pvck
test/lib/pvcreate
test/lib/pvdisplay
test/lib/pvmove
test/lib/pvremove
test/lib/pvresize
test/lib/pvs
test/lib/pvscan
test/lib/securetest
test/lib/vgcfgbackup
test/lib/vgcfgrestore
test/lib/vgchange
test/lib/vgck
test/lib/vgconvert
test/lib/vgcreate
test/lib/vgdisplay
test/lib/vgexport
test/lib/vgextend
test/lib/vgimport
test/lib/vgimportclone
test/lib/vgmerge
test/lib/vgmknodes
test/lib/vgreduce
test/lib/vgremove
test/lib/vgrename
test/lib/vgs
test/lib/vgscan
test/lib/vgsplit
test/api/lvtest.t
test/api/pe_start.t
test/api/percent.t
test/api/python_lvm_unit.py
test/api/test
test/api/thin_percent.t
test/api/vglist.t
test/api/vgtest.t
test/lib/aux
test/lib/check
test/lib/clvmd
test/lib/dm-version-expected
test/lib/dmeventd
test/lib/dmsetup
test/lib/dmstats
test/lib/fail
test/lib/flavour-ndev-cluster
test/lib/flavour-ndev-cluster-lvmpolld
test/lib/flavour-ndev-lvmetad
test/lib/flavour-ndev-lvmetad-lvmpolld
test/lib/flavour-ndev-lvmpolld
test/lib/flavour-ndev-vanilla
test/lib/flavour-udev-cluster
test/lib/flavour-udev-cluster-lvmpolld
test/lib/flavour-udev-lvmetad
test/lib/flavour-udev-lvmetad-lvmpolld
test/lib/flavour-udev-lvmlockd-dlm
test/lib/flavour-udev-lvmlockd-sanlock
test/lib/flavour-udev-lvmlockd-test
test/lib/flavour-udev-lvmpolld
test/lib/flavour-udev-vanilla
test/lib/fsadm
test/lib/get
test/lib/inittest
test/lib/invalid
test/lib/lvm
test/lib/lvm-wrapper
test/lib/lvmchange
test/lib/lvmdbusd.profile
test/lib/lvmetad
test/lib/lvmpolld
test/lib/not
test/lib/paths
test/lib/paths-common
test/lib/runner
test/lib/should
test/lib/test
test/lib/thin-performance.profile
test/lib/utils
test/lib/version-expected
test/unit/dmraid_t.c
test/unit/unit-test

View File

@ -1,25 +0,0 @@
BSD 2-Clause License
Copyright (c) 2014, Red Hat, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@ -18,7 +18,7 @@ top_builddir = @top_builddir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
SUBDIRS = libdm conf daemons include lib libdaemon man scripts tools
SUBDIRS = conf daemons include lib libdaemon libdm man scripts tools
ifeq ("@UDEV_RULES@", "yes")
SUBDIRS += udev
@ -28,6 +28,14 @@ ifeq ("@INTL@", "yes")
SUBDIRS += po
endif
ifeq ("@APPLIB@", "yes")
SUBDIRS += liblvm
endif
ifeq ("@PYTHON_BINDINGS@", "yes")
SUBDIRS += python
endif
ifeq ($(MAKECMDGOALS),clean)
SUBDIRS += test
endif
@ -35,7 +43,8 @@ endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = conf include man test scripts \
libdaemon lib tools daemons libdm \
udev po
udev po liblvm python \
unit-tests/datastruct unit-tests/mm unit-tests/regex
tools.distclean: test.distclean
endif
DISTCLEAN_DIRS += lcov_reports*
@ -43,24 +52,22 @@ DISTCLEAN_TARGETS += config.cache config.log config.status make.tmpl
include make.tmpl
include $(top_srcdir)/base/Makefile
include $(top_srcdir)/device_mapper/Makefile
include $(top_srcdir)/test/unit/Makefile
lib: libdaemon $(BASE_TARGET) $(DEVICE_MAPPER_TARGET)
libdm: include
libdaemon: include
lib: libdm libdaemon
liblvm: lib
daemons: lib libdaemon tools
scripts: lib
tools: lib libdaemon
tools: lib libdaemon device-mapper
po: tools daemons
man: tools
all_man: tools
test: tools daemons
unit-test run-unit-test: test
scripts: liblvm libdm
lib.device-mapper: include.device-mapper
libdm.device-mapper: include.device-mapper
liblvm.device-mapper: include.device-mapper
daemons.device-mapper: libdm.device-mapper
tools.device-mapper: libdm.device-mapper
scripts.device-mapper: include.device-mapper
device-mapper: tools.device-mapper daemons.device-mapper man.device-mapper
device_mapper: device-mapper
ifeq ("@INTL@", "yes")
lib.pofile: include.pofile
@ -70,25 +77,28 @@ po.pofile: tools.pofile daemons.pofile
pofile: po.pofile
endif
ifeq ("@PYTHON_BINDINGS@", "yes")
python: liblvm
endif
ifneq ("$(CFLOW_CMD)", "")
tools.cflow: libdm.cflow lib.cflow
daemons.cflow: tools.cflow
cflow: include.cflow
endif
CSCOPE_DIRS = base daemons device_mapper include lib libdaemon scripts tools libdm test
ifneq ("@CSCOPE_CMD@", "")
cscope.out:
@CSCOPE_CMD@ -b -R $(patsubst %,-s%,$(addprefix $(srcdir)/,$(CSCOPE_DIRS)))
@CSCOPE_CMD@ -b -R -s$(top_srcdir)
all: cscope.out
endif
DISTCLEAN_TARGETS += cscope.out
CLEAN_DIRS += autom4te.cache
check check_system check_cluster check_local check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock: test
check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit: all
$(MAKE) -C test $(@)
conf.generate man.generate: tools
conf.generate: tools
# how to use parenthesis in makefiles
leftparen:=(
@ -112,15 +122,14 @@ rpm: dist
$(LN_S) -f $(abs_top_srcdir)/spec/packages.inc $(rpmbuilddir)/SOURCES
DM_VER=$$(cut -d- -f1 $(top_srcdir)/VERSION_DM);\
GIT_VER=$$(cd $(top_srcdir); git describe | cut -d- --output-delimiter=. -f2,3 || echo 0);\
$(SED) -e "s,\(device_mapper_version\) [0-9.]*$$,\1 $$DM_VER," \
sed -e "s,\(device_mapper_version\) [0-9.]*$$,\1 $$DM_VER," \
-e "s,^\(Version:[^0-9%]*\)[0-9.]*$$,\1 $(LVM_VER)," \
-e "s,^\(Release:[^0-9%]*\)[0-9.]\+,\1 $$GIT_VER," \
$(top_srcdir)/spec/source.inc >$(rpmbuilddir)/SOURCES/source.inc
V=$(V) rpmbuild -v --define "_topdir $(rpmbuilddir)" -ba $(top_srcdir)/spec/lvm2.spec
rpmbuild -v --define "_topdir $(rpmbuilddir)" -ba $(top_srcdir)/spec/lvm2.spec
generate: conf.generate man.generate
generate: conf.generate
$(MAKE) -C conf generate
$(MAKE) -C man generate
all_man:
$(MAKE) -C man all_man
@ -134,7 +143,7 @@ install_system_dirs:
$(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_RUN_DIR)
$(INSTALL_ROOT_DATA) /dev/null $(DESTDIR)$(DEFAULT_CACHE_DIR)/.cache
install_initscripts:
install_initscripts:
$(MAKE) -C scripts install_initscripts
install_systemd_generators:
@ -147,36 +156,26 @@ install_systemd_units:
install_all_man:
$(MAKE) -C man install_all_man
ifeq ("@PYTHON_BINDINGS@", "yes")
install_python_bindings:
$(MAKE) -C liblvm/python install_python_bindings
endif
install_tmpfiles_configuration:
$(MAKE) -C scripts install_tmpfiles_configuration
help:
@echo -e "\nAvailable targets:"
@echo " all Default target."
@echo " all_man Build all man pages with generators."
@echo " clean Remove all compile files."
@echo " device-mapper Device mapper part of lvm2."
@echo " dist Generate distributable file."
@echo " distclean Remove all build files."
@echo " generate Generate man pages for sources."
@echo " help Display callable targets."
@echo " install Install all files."
@echo " install_all_man Install all man pages."
@echo " install_cluster Install cmirrord."
@echo " install_device-mapper Install device mapper files."
@echo " install_initscripts Install initialization scripts."
@echo " install_lvm2 Install lvm2 files."
@echo " install_systemd_units Install systemd units."
@echo " lcov Generate lcov output."
@echo " lcov-dated Generate lcov with timedate suffix."
@echo " lcov-reset Reset lcov counters"
@echo " man Build man pages."
@echo " rpm Build rpm."
@echo " run-unit-test Run unit tests."
@echo " tags Generate c/etags."
LCOV_TRACES = libdm.info lib.info liblvm.info tools.info \
libdaemon/client.info libdaemon/server.info \
daemons/clvmd.info \
daemons/dmeventd.info \
daemons/lvmetad.info \
daemons/lvmlockd.info \
daemons/lvmpolld.info
CLEAN_TARGETS += $(LCOV_TRACES)
ifneq ("$(LCOV)", "")
.PHONY: lcov-reset lcov lcov-dated
.PHONY: lcov-reset lcov lcov-dated $(LCOV_TRACES)
ifeq ($(MAKECMDGOALS),lcov-dated)
LCOV_REPORTS_DIR := lcov_reports-$(shell date +%Y%m%d%k%M%S)
@ -186,26 +185,59 @@ LCOV_REPORTS_DIR := lcov_reports
endif
lcov-reset:
$(LCOV) --zerocounters --directory $(top_builddir)
$(LCOV) --zerocounters $(addprefix -d , $(basename $(LCOV_TRACES)))
# maybe use subdirs processing to create tracefiles...
$(LCOV_TRACES):
$(LCOV) -b $(basename $@) -d $(basename $@) \
--ignore-errors source -c -o - | $(SED) \
-e "s/\(dmeventd_lvm.[ch]\)/plugins\/lvm2\/\1/" \
-e "s/dmeventd_\(mirror\|snapshot\|thin\|raid\)\.c/plugins\/\1\/dmeventd_\1\.c/" \
>$@
ifneq ("$(GENHTML)", "")
lcov:
$(RM) -rf $(LCOV_REPORTS_DIR)
lcov: $(LCOV_TRACES)
$(RM) -r $(LCOV_REPORTS_DIR)
$(MKDIR_P) $(LCOV_REPORTS_DIR)
$(LCOV) --capture --directory $(top_builddir) --ignore-errors source \
--output-file $(LCOV_REPORTS_DIR)/out.info
-test ! -s $(LCOV_REPORTS_DIR)/out.info || \
$(GENHTML) -o $(LCOV_REPORTS_DIR) --ignore-errors source \
$(LCOV_REPORTS_DIR)/out.info
for i in $(LCOV_TRACES); do \
test -s $$i -a $$(wc -w <$$i) -ge 100 && lc="$$lc $$i"; \
done; \
test -z "$$lc" || $(GENHTML) -p @abs_top_builddir@ \
-o $(LCOV_REPORTS_DIR) $$lc
endif
endif
ifneq ($(shell which ctags 2>/dev/null),)
ifeq ("$(TESTING)", "yes")
# testing and report generation
RUBY=ruby1.9 -Ireport-generators/lib -Ireport-generators/test
.PHONY: unit-test ruby-test test-programs
# FIXME: put dependencies on libdm and liblvm
# FIXME: Should be handled by Makefiles in subdirs, not here at top level.
test-programs:
cd unit-tests/regex && $(MAKE)
cd unit-tests/datastruct && $(MAKE)
cd unit-tests/mm && $(MAKE)
unit-test: test-programs
$(RUBY) report-generators/unit_test.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
memcheck: test-programs
$(RUBY) report-generators/memcheck.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
ruby-test:
$(RUBY) report-generators/test/ts.rb
endif
ifneq ($(shell which ctags),)
.PHONY: tags
tags:
test -z "$(shell find $(addprefix $(top_srcdir)/,$(CSCOPE_DIRS)) -type f -name '*.[ch]' -newer tags 2>/dev/null | head -1)" || $(RM) tags
test -f tags || find $(addprefix $(top_srcdir)/,$(CSCOPE_DIRS)) -maxdepth 5 -type f -name '*.[ch]' -exec ctags -a '{}' +
test -z "$(shell find $(top_srcdir) -type f -name '*.[ch]' -newer tags 2>/dev/null | head -1)" || $(RM) tags
test -f tags || find $(top_srcdir) -maxdepth 5 -type f -name '*.[ch]' -exec ctags -a '{}' +
CLEAN_TARGETS += tags
endif

28
README
View File

@ -1,23 +1,16 @@
This tree contains the LVM2 and device-mapper tools and libraries.
This is development branch, for stable 2.02 release see stable-2.02 branch.
For more information about LVM2 read the changelog in the WHATS_NEW file.
Installation instructions are in INSTALL.
There is no warranty - see COPYING and COPYING.LIB.
Tarballs are available from:
ftp://sourceware.org/pub/lvm2/
https://github.com/lvmteam/lvm2/releases
ftp://sources.redhat.com/pub/lvm2/
The source code is stored in git:
https://sourceware.org/git/?p=lvm2.git
git clone git://sourceware.org/git/lvm2.git
mirrored to:
https://github.com/lvmteam/lvm2
git clone https://github.com/lvmteam/lvm2.git
git clone git@github.com:lvmteam/lvm2.git
http://git.fedorahosted.org/git/lvm2.git
git clone git://git.fedorahosted.org/git/lvm2.git
Mailing list for general discussion related to LVM2:
linux-lvm@redhat.com
@ -35,17 +28,6 @@ and multipath-tools:
dm-devel@redhat.com
Subscribe from https://www.redhat.com/mailman/listinfo/dm-devel
Website:
https://sourceware.org/lvm2/
The source code repository used until 7th June 2012 is accessible here:
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/?cvsroot=lvm2.
Report upstream bugs at:
https://bugzilla.redhat.com/enter_bug.cgi?product=LVM%20and%20device-mapper
or open issues at:
https://github.com/lvmteam/lvm2/issues
The source code repository used until 7th June 2012 is accessible using CVS:
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 login cvs
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 checkout LVM2
The password is cvs.

62
TESTING
View File

@ -1,62 +0,0 @@
LVM2 Test Suite
===============
The codebase contains many tests in the test subdirectory.
Before running tests
--------------------
Keep in mind the testsuite MUST run under root user.
It is recommended not to use LVM on the test machine, especially when running
tests with udev (`make check_system`.)
You MUST disable (or mask) any LVM daemons:
- lvmetad
- dmeventd
- lvmpolld
- lvmdbusd
- lvmlockd
- clvmd
- cmirrord
For running cluster tests, we are using singlenode locking. Pass
`--with-clvmd=singlenode` to configure.
NOTE: This is useful only for testing, and should not be used in produciton
code.
To run D-Bus daemon tests, existing D-Bus session is required.
Running tests
-------------
As root run:
make check
To run only tests matching a string:
make check T=test
To skip tests matching a string:
make check S=test
There are other targets and many environment variables can be used to tweak the
testsuite - for full list and description run `make -C test help`.
Installing testsuite
--------------------
It is possible to install and run a testsuite against installed LVM. Run the
following:
make -C test install
Then lvm2-testsuite binary can be executed to test installed binaries.
See `lvm2-testsuite --help` for options. The same environment variables can be
used as with `make check`.

View File

@ -1 +1 @@
2.03.16(2)-git (2022-02-07)
2.02.167(2)-git (2016-09-26)

View File

@ -1 +1 @@
1.02.185-git (2022-02-07)
1.02.136-git (2016-09-26)

673
WHATS_NEW
View File

@ -1,674 +1,5 @@
Version 2.03.16 -
====================================
Version 2.03.15 - 07th February 2022
====================================
Remove service based autoactivation. global/event_activation = 0 is NOOP.
Improve support for metadata profiles for --type writecache.
Use cache or active DM device when available with new kernels.
Introduce function to utilize UUIDs from DM_DEVICE_LIST.
Increase some hash table size to better support large device sets.
Version 2.03.14 - 20th October 2021
===================================
Device scanning is skipping directories on different filesystems.
Print info message with too many or too large archived files.
Reduce metadata readings during scanning phase.
Optimize computation of crc32 check sum with multiple PVs.
Enhance recover path on cache creation failure.
Filter out unsupported MQ/SMQ cache policy setting.
Fix memleak in mpath filter.
Support newer location for VDO statistics.
Add support for VDO async-unsafe write policy.
Improve lvm_import_vdo script.
Support VDO LV with lvcreate -ky.
Fix lvconvert for VDO LV bigger then 2T.
Create VDO LVs automatically without zeroing.
Rename vdoimport to lvm_import_vdo.
Version 2.03.13 - 11th August 2021
==================================
Changes in udev support:
- obtain_device_list_from_udev defaults to 0.
- see devices/external_device_info_source,
devices/obtain_device_list_from_udev, and devices/multipath_wwids_file help
in lvm.conf
Fix devices file handling of loop with deleted backing file.
Fix devices file handling of scsi_debug WWIDs.
Fix many static analysis issues.
Support --poolmetadataspare with vgsplit and vgmerge.
Fix detection of active components of external origin volume.
Add vdoimport tool to support conversion of VDO volumes.
Support configurable allocation/vdo_pool_header_size.
Fix handling of lvconvert --type vdo-pool --virtualsize.
Simplified handling of archive() and backup() internal calls.
Add 'idm' locking type for IDM lock manager.
Fix load of kvdo target when it is not present in memory (2.03.12).
Version 2.03.12 - 07th May 2021
===============================
Allow attaching cache to thin data volume.
Fix memleak when generating list of outdated pvs.
Better hyphenation usage in man pages.
Replace use of deprecated security_context_t with char*.
Configure supports AIO_LIBS and AIO_CFLAGS.
Improve build process for static builds.
New --setautoactivation option to modify LV or VG auto activation.
New metadata based autoactivation property for LVs and VGs.
Improve signal handling with lvmpolld.
Signal handler can interrupt command also for SIGTERM.
Lvreduce --yes support.
Add configure option --with/out-symvers for non-glibc builds.
Report error when the filesystem is missing on fsadm resized volume.
Handle better blockdev with --getsize64 support for fsadm.
Do not include editline/history.h when using editline library.
Support error and zero segtype for thin-pool data for testing.
Support mixed extension for striped, error and zero segtypes.
Support resize also for stacked virtual volumes.
Skip dm-zero devices just like with dm-error target.
Reduce ioctl() calls when checking target status.
Merge polling does not fail, when LV is found to be already merged.
Poll volumes with at least 100ms delays.
Do not flush dm cache when cached LV is going to be removed.
New lvmlockctl_kill_command configuration option.
Support interruption while waiting on device close before deactivation.
Flush thin-pool messages before removing more thin volumes.
Improve hash function with less collisions and make it faster.
Reduce ioctl count when deactivating volumes.
Reduce number of metadata parsing.
Enhance performance of lvremove and vgremove commands.
Support interruption when taking archive and backup.
Accelerate large lvremoves.
Speedup search for cached device nodes.
Speedup command initialization.
Add devices file feature, off by default for now.
Support extension of writecached volumes.
Fix problem with unbound variable usage within fsadm.
Fix IMSM MD RAID detection on 4k devices.
Check for presence of VDO target before starting any conversion.
Support metatadata profiles with volume VDO pool conversions.
Support -Zn for conversion of already formated VDO pools.
Avoid removing LVs on error path of lvconvert during creation volumes.
Fix crashing lvdisplay when thin volume was waiting for merge.
Support option --errorwhenfull when converting volume to thin-pool.
Improve thin-performance profile support conversion to thin-pool.
Add workaround to avoid read of internal 'converted' devices.
Prohibit merging snapshot into the read-only thick snapshot origin.
Restore support for flipping rw/r permissions for thin snapshot origin.
Support resize of cached volumes.
Disable autoactivation with global/event_activation=0.
Check if lvcreate passes read_only_volume_list with tags and skips zeroing.
Allocation prints better error when metadata cannot fit on a single PV.
Pvmove can better resolve full thin-pool tree move.
Limit pool metadata spare to 16GiB.
Improves conversion and allocation of pool metadata.
Support thin pool metadata 15.88GiB, adds 64MiB, thin_pool_crop_metadata=0.
Enhance lvdisplay to report raid available/partial.
Support online rename of VDO pools.
Improve removal of pmspare when last pool is removed.
Fix problem with wiping of converted LVs.
Fix memleak in scanning (2.03.11).
Fix corner case allocation for thin-pools.
Version 2.03.11 - 08th January 2021
===================================
Fix pvck handling MDA at offset different from 4096.
Partial or degraded activation of writecache is not allowed.
Enhance error handling for fsadm and handle correct fsck result.
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
Support using BLKZEROOUT for clearing devices.
Support interruption when wipping LVs.
Support interruption for bcache waiting.
Fix bcache when device has too many failing writes.
Fix bcache waiting for IO completion with failing disks.
Configure use own python path name order to prefer using python3.
Add configure --enable-editline support as an alternative to readline.
Enhance reporting and error handling when creating thin volumes.
Enable vgsplit for VDO volumes.
Lvextend of vdo pool volumes ensure at least 1 new VDO slab is added.
Use revert_lv() on reload error path after vg_revert().
Configure --with-integrity enabled.
Restore lost signal blocking while VG lock is held.
Improve estimation of needed extents when creating thin-pool.
Use extra 1% when resizing thin-pool metadata LV with --use-policy.
Enhance --use-policy percentage rounding.
Configure --with-vdo and --with-writecache as internal segments.
Improving VDO man page examples.
Allow pvmove of writecache origin.
Report integrity fields.
Integrity volumes defaults to journal mode.
Switch code base to use flexible array syntax.
Fix 64bit math when calculation cachevol size.
Preserve uint32_t for seqno handling.
Switch from mmap to plain read when loading regular files.
Update lvmvdo man page and better explain DISCARD usage.
Version 2.03.10 - 09th August 2020
==================================
Add writecache and integrity support to lvmdbusd.
Generate unique cachevol name when default required from lvcreate.
Converting RAID1 volume to one with same number of legs now succeeds with a
warning.
Fix conversion to raid from striped lagging type.
Fix conversion to 'mirrored' mirror log with larger regionsize.
Zero pool metadata on allocation (disable with allocation/zero_metadata=0).
Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn).
Add lvcreate of new cache or writecache lv with single command.
Fix running out of free buffers for async writing for larger writes.
Add integrity with raid capability.
Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
Version 2.03.09 - 26th March 2020
=================================
Fix formating of vdopool (vdo_slab_size_mb was smaller by 2 bits).
Fix showing of a dm kernel error when uncaching a volume with cachevol.
Version 2.03.08 - 11th February 2020
====================================
Prevent problematic snapshots of writecache volumes.
Add error handling for failing allocation in _reserve_area().
Fix memleak in syncing of internal cache.
Fix pvck dump_current_text memleak.
Fix lvmlockd result code on error path for _query_lock_lv().
Update pvck man page and help output.
Reject invalid writecache high/low_watermark setting.
Report writecache status.
Accept more output lines from vdo_format.
Prohibit reshaping of stacked raid LVs.
Avoid running cache input arg validation when creating vdo pool.
Prevent raid reshaping of stacked volumes.
Added VDO lvmdbusd methods for enable/disable compression & dedupe.
Added VDO lvmdbusd method for converting LV to VDO pool.
Version 2.03.07 - 30th November 2019
====================================
Subcommand in vgck for repairing headers and metadata.
Ensure minimum required region size on striped RaidLV creation.
Fix resize of thin-pool with data and metadata of different segtype.
Improve mirror type leg splitting.
Improve error path handling in daemons on shutdown.
Fix activation order when removing merged snapshot.
Experimental VDO support for lvmdbusd.
Version 2.03.06 - 23rd October 2019
===================================
Add _cpool suffix to cache-pool LV name when used by caching LV.
No longer store extra UUID for cmeta and cdata cachevol layer.
Enhance activation of cache devices with cachevols.
Add _cvol in list of protected suffixes and start use it with DM UUID.
Rename LV converted to cachevol to use _cvol suffix.
Use normal LVs for wiping of cachevols.
Reload cleanered cache DM only with cleaner policy.
Fix cmd return when zeroing of cachevol fails.
Extend lvs to show all VDO properties.
Preserve VDO write policy with vdopool.
Increase default vdo bio threads to 4.
Continue report when cache_status fails.
Add support for DM_DEVICE_GET_TARGET_VERSION into device_mapper.
Fix cmirrord usage of header files from device_mapper subdir.
Allow standalone activation of VDO pool just like for thin-pools.
Activate thin-pool layered volume as 'read-only' device.
Ignore crypto devices with UUID signature CRYPT-SUBDEV.
Enhance validation for thin and cache pool conversion and swapping.
Improve internal removal of cached devices.
Synchronize with udev when dropping snapshot.
Add missing device synchronization point before removing pvmove node.
Correctly set read_ahead for LVs when pvmove is finished.
Remove unsupported OPTIONS+="event_timeout" udev rule from 11-dm-lvm.rules.
Prevent creating VGs with PVs with different logical block sizes.
Fix metadata writes from corrupting with large physical block size.
Version 2.03.05 - 15th June 2019
================================
Fix command definition for pvchange -a.
Add vgck --updatemetadata command that will repair metadata problems.
Improve VG reading to work if one good copy of metadata is found.
Report/display/scan commands that read VGs will no longer write/repair.
Move metadata repairs from VG reading to VG writing.
Add config setting md_component_checks to control MD component checks.
Add end of device MD component checks when dev has no udev info.
Version 2.03.04 - 10th June 2019
================================
Remove unused_duplicate_devs from cmd causing segfault in dmeventd.
Version 2.03.03 - 07th June 2019
================================
Report no_discard_passdown for cache LVs with lvs -o+kernel_discards.
Add pvck --dump option to extract metadata.
Fix signal delivery checking race in libdaemon (lvmetad).
Add missing Before=shutdown.target to LVM2 services to fix shutdown ordering.
Skip autoactivation for a PV when PV size does not match device size.
Remove first-pvscan-initialization which should no longer be needed.
Add remote refresh through lvmlockd/dlm for shared LVs after lvextend.
Ignore foreign and shared PVs for pvscan online files.
Add config setting to control fields in debug file and verbose output.
Add command[pid] and timestamp to debug file and verbose output.
Fix missing growth of _pmsmare volume when extending _tmeta volume.
Automatically grow thin metadata, when thin data gets too big.
Add synchronization with udev before removing cached devices.
Add support for caching VDO LVs and VDOPOOL LVs.
Add support for vgsplit with cached devices.
Query mpath device only once per command for its state.
Use device INFO instead of STATUS when checking for mpath device uuid.
Change default io_memory_size from 4 to 8 MiB.
Add config setting io_memory_size to set bcache size.
Fix pvscan autoactivation for concurrent pvscans.
Change scan_lvs default to 0 so LVs are not scanned for PVs.
Thin-pool selects power-of-2 chunk size by default.
Cache selects power-of-2 chunk size by default.
Support reszing for VDOPoolLV and VDOLV.
Improve -lXXX%VG modifier which improves cache segment estimation.
Ensure migration_threshold for cache is at least 8 chunks.
Restore missing man info lvcreate --zero for thin-pools.
Drop misleadning comment for metadata minimum_io_size for VDO segment.
Add device hints to reduce scanning.
Introduce LVM_SUPPRESS_SYSLOG to suppress syslog usage by generator.
Fix generator quering lvmconfig unpresent config option.
Fix memleak on bcache error path code.
Fix missing unlock on lvm2 dmeventd plugin error path initialization.
Improve Makefile dependency tracking.
Move VDO support towards V2 target (6.2) support.
Version 2.03.02 - 18th December 2018
====================================
Fix missing proper initialization of pv_list struct when adding pv.
Fix (de)activation of RaidLVs with visible SubLVs.
Prohibit mirrored 'mirror' log via lvcreate and lvconvert.
Use sync io if async io_setup fails, or use_aio=0 is set in config.
Fix more issues reported by coverity scan.
Version 2.03.01 - 31st October 2018
===================================
Version 2.03.00 - 10th October 2018
===================================
Add hot fix to avoiding locking collision when monitoring thin-pools.
Allow raid4 -> linear conversion request.
Fix lvconvert striped/raid0/raid0_meta -> raid6 regression.
Add 'lvm2-activation-generator:' prefix for kmsg messages logged by generator.
Add After=rbdmap.service to {lvm2-activation-net,blk-availability}.service.
Reduce max concurrent aios to avoid EMFILE with many devices.
Fix lvconvert conversion attempts to linear.
Fix lvconvert raid0/raid0_meta -> striped regression.
Fix lvconvert --splitmirror for mirror type (2.02.178).
Do not pair cache policy and cache metadata format.
lvconvert: reject conversions on raid1 LVs with split tracked SubLVs
lvconvert: reject conversions on raid1 split tracked SubLVs
Add basic creation support for VDO target.
Never send any discard ioctl with test mode.
Fix thin-pool alloc which needs same PV for data and metadata.
Extend list of non-memlocked areas with newly linked libs.
Enhance vgcfgrestore to check for active LVs in restored VG.
Configure supports --disable-silent-rules for verbose builds.
Fix unmonitoring of merging snapshots.
Cache can uses metadata format 2 with cleaner policy.
Fix check if resized PV can also fit metadata area.
Avoid showing internal error in lvs output or pvmoved LVs.
Remove clvmd
Remove lvmlib (api)
Remove lvmetad
Use versionsort to fix archive file expiry beyond 100000 files.
Version 2.02.178-rc1 - 24th May 2018
====================================
Add libaio dependency for build.
Remove lvm1 and pool format handling and add filter to ignore them.
Move some filter checks to after disks are read.
Rework disk scanning and when it is used.
Add new io layer and shift code to using it.
Fix lvconvert's return code on degraded -m raid1 conversion.
--enable-testing switch for ./configure has been removed.
--with-snapshots switch for ./configure has been removed.
--with-mirrors switch for ./configure has been removed.
--with-raid switch for ./configure has been removed.
--with-thin switch for ./configure has been removed.
--with-cache switch for ./configure has been removed.
Include new unit-test framework and unit tests.
Extend validation of region_size for mirror segment.
Reload whole device stack when reinitilizing mirror log.
Mirrors without monitoring are WARNING and not blocking on error.
Detect too big region_size with clustered mirrors.
Fix evaluation of maximal region size for mirror log.
Enhance mirror log size estimation and use smaller size when possible.
Fix incorrect mirror log size calculation on 32bit arch.
Enhance preloading tree creating.
Fix regression on acceptance of any LV on lvconvert.
Restore usability of thin LV to be again external origin for another thin.
Keep systemd vars on change event in 69-dm-lvm-metad.rules for systemd reload.
Write systemd and non-systemd rule in 69-dm-lvm-metad.rules, GOTO active one.
Add test for activation/volume_list (Sub)LV remnants.
Disallow usage of cache format 2 with mq cache policy.
Again accept striped LV as COW LV with lvconvert -s (2.02.169).
Fix raid target version testing for supported features.
Allow activation of pools when thin/cache_check tool is missing.
Remove RaidLV on creation failure when rmeta devices can't be activated.
Add prioritized_section() to restore cookie boundaries (2.02.177).
Enhance error messages when read error happens.
Enhance mirror log initialization for old mirror target.
Skip private crypto and stratis devices.
Skip frozen raid devices from scanning.
Activate RAID SubLVs on read_only_volume_list readwrite.
Offer convenience type raid5_n converting to raid10.
Automatically avoid reading invalid snapshots during device scan.
Ensure COW device is writable even for read-only thick snapshots.
Support activation of component LVs in read-only mode.
Extend internal library to recognize and work with component LV.
Skip duplicate check for active LV when prompting for its removal.
Activate correct lock holding LV when it is cached.
Do not modify archived metadata when removing striped raid.
Fix memleak on error path when obtaining lv_raid_data_offset.
Fix compatibility size test of extended external origin.
Add external_origin visiting in for_each_sub_lv().
Ensure cluster commands drop their device cache before locking VG.
Do not report LV as remotely active when it's locally exclusive in cluster.
Add deprecate messages for usage of mirrors with mirrorlog.
Separate reporting of monitoring status and error status.
Improve validation of created strings in vgimportclone.
Add missing initialisation of mem pool in systemd generator.
Do not reopen output streams for multithreaded users of liblvm.
Configure ensures /usr/bin dir is checked for dmpd tools.
Restore pvmove support for wide-clustered active volumes (2.02.177).
Avoid non-exclusive activation of exclusive segment types.
Fix trimming sibling PVs when doing a pvmove of raid subLVs.
Preserve exclusive activation during thin snaphost merge.
Avoid exceeding array bounds in allocation tag processing.
Add --lockopt to common options and add option to skip selected locks.
Version 2.02.177 - 18th December 2017
=====================================
When writing text metadata content, use complete 4096 byte blocks.
Change text format metadata alignment from 512 to 4096 bytes.
When writing metadata, consistently skip mdas marked as failed.
Refactor and adjust text format metadata alignment calculation.
Fix python3 path in lvmdbusd to use value detected by configure.
Reduce checks for active LVs in vgchange before background polling.
Ensure _node_send_message always uses clean status of thin pool.
Fix lvmlockd to use pool lock when accessing _tmeta volume.
Report expected sanlock_convert errors only when retries fail.
Avoid blocking in sanlock_convert on SH to EX lock conversion.
Deactivate missing raid LV legs (_rimage_X-missing_Y_Z) on decativation.
Skip read-modify-write when entire block is replaced.
Categorise I/O with reason annotations in debug messages.
Allow extending of raid LVs created with --nosync after a failed repair.
Command will lock memory only when suspending volumes.
Merge segments when pvmove is finished.
Remove label_verify that has never been used.
Ensure very large numbers used as arguments are not casted to lower values.
Enhance reading and validation of options stripes and stripes_size.
Fix printing of default stripe size when user is not using stripes.
Activation code for pvmove automatically discovers holding LVs for resume.
Make a pvmove LV locking holder.
Do not change critical section counter on resume path without real resume.
Enhance activation code to automatically suspend pvmove participants.
Prevent conversion of thin volumes to snapshot origin when lvmlockd is used.
Correct the steps to change lock type in lvmlockd man page.
Retry lock acquisition on recognized sanlock errors.
Fix lock manager error codes in lvmlockd.
Remove unnecessary single read from lvmdiskscan.
Check raid reshape flags in vg_validate().
Add support for pvmove of cache and snapshot origins.
Avoid using precommitted metadata for suspending pvmove tree.
Ehnance pvmove locking.
Deactivate activated LVs on error path when pvmove activation fails.
Add "io" to log/debug_classes for logging low-level I/O.
Eliminate redundant nested VG metadata in VG struct.
Avoid importing persistent filter in vgscan/pvscan/vgrename.
Fix memleak of string buffer when vgcfgbackup runs in secure mode.
Do not print error when clvmd cannot find running clvmd.
Prevent start of new merge of snapshot if origin is already being merged.
Fix offered type for raid6_n_6 to raid5 conversion (raid5_n).
Deactivate sub LVs when removing unused cache-pool.
Do not take backup with suspended devices.
Avoid RAID4 activation on incompatible kernels under all circumstances.
Reject conversion request to striped/raid0 on 2-legged raid4/5.
Version 2.02.176 - 3rd November 2017
====================================
Keep Install section only in lvm2-{lvmetad,lvmpolld}.socket systemd unit.
Fix segfault in lvm_pv_remove in liblvm. (2.02.173)
Do not allow storing VG metadata with LV without any segment.
Fix printed message when thin snapshot was already merged.
Remove created spare LV when creation of thin-pool failed.
Avoid reading ignored metadata when mda gets used again.
Fix detection of moved PVs in vgsplit. (2.02.175)
Ignore --stripes/--stripesize on RAID takeover
Improve used paths for generated systemd units and init shells.
Disallow creation of snapshot of mirror/raid subLV (was never supported).
Fix regression in more advanced vgname extraction in lvconvert (2.02.169).
Allow lvcreate to be used for caching of _tdata LV.
Avoid internal error when resizing cache type _tdata LV (not yet supported).
Show original converted names when lvconverting LV to pool volume.
Move lib code used only by liblvm into metadata-liblvm.c.
Distinguish between device not found and excluded by filter.
Monitor external origin LVs.
Remove the replicator code, including configure --with-replicators.
Allow lvcreate --type mirror to work with 100%FREE.
Improve selection of resource name for complex volume activation lock.
Avoid cutting first character of resource name for activation lock.
Support for encrypted devices in fsadm.
Improve thin pool overprovisioning and repair warning messages.
Fix incorrect adjustment of region size on striped RaidLVs.
Version 2.02.175 - 6th October 2017
===================================
Use --help with blockdev when checking for --getsize64 support in fsadm.
Dump lvmdbusd debug information with SIGUSR1.
Fix metadata corruption in vgsplit and vgmerge intermediate states.
Add PV_MOVED_VG PV status flag to mark PVs moving between VGs.
Fix lvmdbus hang and recognise unknown VG correctly.
Improve error messages when command rules fail.
Require LV name with pvmove in a shared VG.
Allow shared active mirror LVs with lvmlockd, dlm, and cmirrord.
Support lvconvert --repair with cache and cachepool volumes.
lvconvert --repair respects --poolmetadataspare option.
Mark that we don't plan to develop liblvm2app and python bindings any further.
Fix thin pool creation in shared VG. (2.02.173)
Version 2.02.174 - 13th September 2017
Version 2.02.167 -
======================================
Prevent raid1 split with trackchanges in a shared VG.
Avoid double unlocking of client & lockspace mutexes in lvmlockd.
Fix leaking of file descriptor for non-blocking filebased locking.
Fix check for 2nd mda at end of disk fits if using pvcreate --restorefile.
Use maximum metadataarea size that fits with pvcreate --restorefile.
Always clear cached bootloaderarea when wiping label e.g. in pvcreate.
Disallow --bootloaderareasize with pvcreate --restorefile.
Fix lvmlockd check for running lock managers during lock adoption.
Add --withgeneralpreamble and --withlocalpreamble to lvmconfig.
Improve makefiles' linking.
Fix some paths in generated makefiles to respected configured settings.
Add warning when creating thin-pool with zeroing and chunk size >= 512KiB.
Introduce exit code 4 EINIT_FAILED to replace -1 when initialisation fails.
Add synchronization points with udev during reshape of raid LVs.
Version 2.02.173 - 20th July 2017
=================================
Add synchronization points with udev during conversion of raid LVs.
Improve --size args validation and report more detailed error message.
Initialize debugging mutex before any debug message in clvmd.
Log error instead of warn when noticing connection problem with lvmetad.
Fix memory leak in lvmetad when working with duplicates.
Remove restrictions on reshaping open and clustered raid devices.
Add incompatible data_offset to raid metadata to fix reshape activation.
Accept 'lvm -h' and 'lvm --help' as well as 'lvm help' for help.
Suppress error message from accept() on clean lvmetad shutdown.
Tidy clvmd client list processing and fix segfaults.
Protect clvmd debug log messages with mutex and add client id.
Fix shellcheck reported issues for script files.
Version 2.02.172 - 28th June 2017
=================================
Add missing NULL to argv array when spliting cmdline arguments.
Add display_percent helper function for printing percent values.
lvconvert --repair handles failing raid legs (present but marked 'D'ead).
Do not lvdisplay --maps unset settings of cache pool.
Fix lvdisplay --maps for cache pool without policy settings.
Support aborting of flushing cache LV.
Reenable conversion of data and metadata thin-pool volumes to raid.
Improve raid status reporting with lvs.
No longer necessary to '--force' a repair for RAID1.
Linear to RAID1 upconverts now use "recover" sync action, not "resync".
Improve lvcreate --cachepool arg validation.
Limit maximum size of thin-pool for specific chunk size.
Print a warning about in-use PVs with no VG using them.
Disable automatic clearing of PVs that look like in-use orphans.
Cache format2 flag is now using segment name type field.
Support storing status flags via segtype name field.
Stop using '--yes' mode when fsadm runs without terminal.
Extend validation of filesystems resized by fsadm.
Enhance lvconvert automatic settings of possible (raid) LV types.
Allow lvchange to change properties on a thin pool data sub LV.
Fix lvcreate extent percentage calculation for mirrors.
Don't reinstate still-missing devices when correcting inconsistent metadata.
Properly handle subshell return codes in fsadm.
Disallow cachepool creation with policy cleaner and mode writeback.
Version 2.02.171 - 3rd May 2017
===============================
Fix memory warnings by using mempools for command definition processing.
Fix running commands from a script file.
Add pvcreate prompt when device size doesn't match setphysicalvolumesize.
lvconvert - preserve region size on raid1 image count changes
Adjust pvresize/pvcreate messages and prompt if underlying dev size differs.
raid - sanely handle insufficient space on takeover.
Fix configure --enable-notify-dbus status message.
Change configure option name prefix from --enable-lockd to --enable-lvmlockd.
lvcreate - raise mirror/raid default regionsize to 2MiB
Add missing configurable prefix to configuration file installation directory.
Version 2.02.170 - 13th April 2017
==================================
Introduce global/fsadm_executable to make fsadm path configurable.
Look for limited thin pool metadata size when using 16G metadata.
Add lvconvert pool creation rule disallowing options with poolmetadata.
Fix lvconvert when the same LV is incorrectly reused in options.
Fix lvconvert VG name validation in option values.
Fix missing lvmlockd LV locks in lvchange and lvconvert.
Fix dmeventd setup for lvchange --poll.
Fix use of --poll and --monitor with lvchange and vgchange.
Disallow lvconvert of hidden LV to a pool.
Ignore --partial option when not used for activation.
Allow --activationmode option with lvchange --refresh.
Better message on lvconvert --regionsize
Allow valid lvconvert --regionsize change
Add raid10 alias raid10_near
Handle insufficient PVs on lvconvert takeover
Fix SIGINT blocking to prevent corrupted metadata
Fix systemd unit existence check for lvmconf --services --startstopservices.
Check and use PATH_MAX buffers when creating vgrename device paths.
Version 2.02.169 - 28th March 2017
==================================
Automatically decide whether '-' in a man page is a hyphen or a minus sign.
Add build-time configuration command line to 'lvm version' output.
Handle known table line parameter order change in specific raid target vsns.
Conditionally reject raid convert to striped/raid0* after reshape.
Ensure raid6 upconversion restrictions.
Adjust mirror & raid dmeventd plugins for new lvconvert --repair behaviour.
Disable lvmetad when lvconvert --repair is run.
Remove obsolete lvmchange binary - convert to built-in command.
Show more information for cached volumes in lvdisplay [-m].
Add option for lvcreate/lvconvert --cachemetadataformat auto|1|2.
Support cache segment with configurable metadata format.
Add allocation/cache_metadata_format profilable settings.
Use function cache_set_params() for both lvcreate and lvconvert.
Skip rounding on cache chunk size boudary when create cache LV.
Improve cache_set_params support for chunk_size selection.
Fix metadata profile allocation/cache_[mode|policy] setting.
Fix missing support for using allocation/cache_pool_chunk_size setting.
Upstream git moved to https://sourceware.org/git/?p=lvm2
Support conversion of raid type, stripesize and number of disks
Reject writemostly/writebehind in lvchange during resynchronization.
Deactivate active origin first before removal for improved workflow.
Fix regression of accepting both --type and -m with lvresize. (2.02.158)
Add lvconvert --swapmetadata, new specific way to swap pool metadata LVs.
Add lvconvert --startpoll, new specific way to start polling conversions.
Add lvconvert --mergethin, new specific way to merge thin snapshots.
Add lvconvert --mergemirrors, new specific way to merge split mirrors.
Add lvconvert --mergesnapshot, new specific way to combine cow LVs.
Split up lvconvert code based on command definitions.
Split up lvchange code based on command definitions.
Generate help output and man pages from command definitions.
Verify all command line items against command definition.
Match every command run to one command definition.
Specify every allowed command definition/syntax in command-lines.in.
Add extra memory page when limiting pthread stack size in clvmd.
Support striped/raid0* <-> raid10_near conversions.
Support shrinking of RaidLVs.
Support region size changes on existing RaidLVs.
Avoid parallel usage of cpg_mcast_joined() in clvmd with corosync.
Support raid6_{ls,rs,la,ra}_6 segment types and conversions from/to it.
Support raid6_n_6 segment type and conversions from/to it.
Support raid5_n segment type and conversions from/to it.
Support new internal command _dmeventd_thin_command.
Introduce new dmeventd/thin_command configurable setting.
Use new default units 'r' for displaying sizes.
Also unmount mount point on top of MD device if using blkdeactivate -u.
Restore check preventing resize of cache type volumes (2.02.158).
Add missing udev sync when flushing dirty cache content.
vgchange -p accepts only uint32 numbers.
Report thin LV date for merged LV when the merge is in progress.
Detect if snapshot merge really started before polling for progress.
Checking LV for merging origin requires also it has merged snapshot.
Extend validation of metadata processing.
Enable usage of cached volumes as snapshot origin LV.
Fix displayed lv name when splitting snapshot (2.02.146).
Warn about command not making metadata backup just once per command.
Enable usage of cached volume as thin volume's external origin.
Support cache volume activation with -real layer.
Improve search of lock-holder for external origin and thin-pool.
Support status checking of cache volume used in layer.
Avoid shifting by one number of blocks when clearing dirty cache volume.
Extend metadata validation of external origin LV use count.
Fix dm table when the last user of active external origin is removed.
Improve reported lvs status for active external origin volume.
Fix table load for splitted RAID LV and require explicit activation.
Always active splitted RAID LV exclusively locally.
Do not use LV RAID status bit for segment status.
Check segtype directly instead of checking RAID in segment status.
Reusing exiting code for raid image removal.
Fix pvmove leaving -pvmove0 error device in clustered VG.
Avoid adding extra '_' at end of raid extracted images or metadata.
Optimize another _rmeta clearing code.
Fix deactivation of raid orphan devices for clustered VG.
Fix lvconvert raid1 to mirror table reload order.
Add internal function for separate mirror log preparation.
Fix segfault in lvmetad from missing NULL in daemon_reply_simple.
Simplify internal _info_run() and use _setup_task_run() for mknod.
Better API for internal function _setup_task_run.
Avoid using lv_has_target_type() call within lv_info_with_seg_status.
Simplify internal lv_info_with_seg_status API.
Decide which status is needed in one place for lv_info_with_seg_status.
Fix matching of LV segment when checking for it info status.
Report log_warn when status cannot be parsed.
Test segment type before accessing segment members when checking status.
Implement compatible target function for stripe segment.
Use status info to report merge failed and snapshot invalid lvs fields.
Version 2.02.168 - 30th November 2016
=====================================
Display correct sync_percent on large RaidLVs
lvmdbusd --blackboxsize <n> added, used to override default size of 16
Allow a transiently failed RaidLV to be refreshed
Use lv_update_and_reload() inside mirror code where it applies.
Preserve mirrored status for temporary layered mirrors.
Use transient raid check before repairing raid volume.
Implement transient status check for raid volumes.
Only log msg as debug if lvm2-lvmdbusd unit missing for D-Bus notification.
Avoid duplicated underscore in name of extracted LV image.
Missing stripe filler now could be also 'zero'.
lvconvert --repair accepts --interval and --background option.
More efficiently prepare _rmeta devices when creating a new raid LV.
Version 2.02.167 - 5th November 2016
====================================
Use log_error in regex and sysfs filter to describe reason of failure.
Fix blkdeactivate to deactivate dev stack if dev on top already unmounted.
Prevent non-synced raid1 repair unless --force
Prevent raid4 creation/conversion on non-supporting kernels
Add direct striped -> raid4 conversion
Fix raid4 parity image pair position on conversions from striped/raid0*
Fix a few unconverted return code values for some lvconvert error path.
Disable lvconvert of thin pool to raid while active.
Disable systemd service start rate limiting for lvm2-pvscan@.service.
@ -1827,7 +1158,7 @@ Version 2.02.105 - 20th January 2014
Allow lvmetad to reuse stale socket.
Only unlink lvmetad socket on error if created by the same process.
Append missing newline to lvmetad missing socket path error message.
Check for non-zero alignment in _text_pv_add_metadata_area() to not div by 0.
Check for non-zero aligment in _text_pv_add_metadata_area() to not div by 0.
Add allocation/use_blkid_wiping to lvm.conf to enable blkid wiping.
Enable blkid_wiping by default if the blkid library is present.
Add configure --disable-blkid_wiping to disable libblkid signature detection.

View File

@ -1,198 +1,5 @@
Version 1.02.185 -
=====================================
Version 1.02.183 - 07th February 2022
=====================================
Unmangle UUIDs for DM_DEVICE_LIST ioctl.
Version 1.02.181 - 20th October 2021
====================================
Add IMA support with 'dmsetup measure' command.
Add defines DM_NAME_LIST_FLAG_HAS_UUID, DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID.
Enhance tracking of activated devices when preloading dm tree.
Fix bug in construction of cache table line (regression from 1.02.159).
Version 1.02.179 - 11th August 2021
===================================
Version 1.02.177 - 07th May 2021
================================
Configure proceeds without libaio to allow build of device-mapper only.
Fix symbol versioning build with -O2 -flto.
Add dm_tree_node_add_thin_pool_target_v1 with crop_metadata support.
Version 1.02.175 - 08th January 2021
====================================
Version 1.02.173 - 09th August 2020
===================================
Add support for VDO in blkdeactivate script.
Version 1.02.171 - 26th March 2020
==================================
Try to remove all created devices on dm preload tree error path.
Fix dm_list interators with gcc 10 optimization (-ftree-pta).
Dmeventd handles timer without looping on short intervals.
Version 1.02.169 - 11th February 2020
=====================================
Enhance error messages for device creation.
Version 1.02.167 - 30th November 2019
=====================================
Version 1.02.165 - 23rd October 2019
====================================
Add support for DM_DEVICE_GET_TARGET_VERSION.
Add debug of dmsetup udevcomplete with hexa print DM_COOKIE_COMPLETED.
Fix versioning of dm_stats_create_region and dm_stats_create_region.
Version 1.02.163 - 15th June 2019
=================================
Version 1.02.161 - 10th June 2019
=================================
Version 1.02.159 - 07th June 2019
=================================
Parsing of cache status understand no_discard_passdown.
Ensure migration_threshold for cache is at least 8 chunks.
Version 1.02.155 - 18th December 2018
=====================================
Include correct internal header inside libdm list.c.
Enhance ioctl flattening and add parameters only when needed.
Add DM_DEVICE_ARM_POLL for API completness matching kernel.
Do not add parameters for RESUME with DM_DEVICE_CREATE dm task.
Fix dmstats report printing no output.
Version 1.02.153 - 31st October 2018
====================================
Version 1.02.151 - 10th October 2018
====================================
Add hot fix to avoiding locking collision when monitoring thin-pools.
Version 1.02.150 - 01 August 2018
=================================
Add vdo plugin for monitoring VDO devices.
Version 1.02.149 - 19th July 2018
=================================
Version 1.02.148 - 18th June 2018
=================================
Version 1.02.147 - 13th June 2018
=================================
Version 1.02.147-rc1 - 24th May 2018
====================================
Reuse uname() result for mirror target.
Recognize also mounted btrfs through dm_device_has_mounted_fs().
Add missing log_error() into dm_stats_populate() returning 0.
Avoid calling dm_stats_populat() for DM devices without any stats regions.
Support DM_DEBUG_WITH_LINE_NUMBERS envvar for debug msg with source:line.
Configured command for thin pool threshold handling gets whole environment.
Fix tests for failing dm_snprintf() in stats code.
Parsing mirror status accepts 'userspace' keyword in status.
Introduce dm_malloc_aligned for page alignment of buffers.
Version 1.02.146 - 18th December 2017
=====================================
Activation tree of thin pool skips duplicated check of pool status.
Remove code supporting replicator target.
Do not ignore failure of _info_by_dev().
Propagate delayed resume for pvmove subvolumes.
Suppress integrity encryption keys in 'table' output unless --showkeys supplied.
Version 1.02.145 - 3rd November 2017
====================================
Keep Install section only in dm-event.socket systemd unit.
Issue a specific error with dmsetup status if device is unknown.
Fix RT_LIBS reference in generated libdevmapper.pc for pkg-config
Version 1.02.144 - 6th October 2017
===================================
Schedule exit when received SIGTERM in dmeventd.
Also try to unmount /boot on blkdeactivate -u if on top of supported device.
Use blkdeactivate -r wait in blk-availability systemd service/initscript.
Add blkdeactivate -r wait option to wait for MD resync/recovery/reshape.
Fix blkdeactivate regression with failing DM/MD devs deactivation (1.02.142).
Fix typo in blkdeactivate's '--{dm,lvm,mpath}options' option name.
Correct return value testing when get reserved values for reporting.
Take -S with dmsetup suspend/resume/clear/wipe_table/remove/deps/status/table.
Version 1.02.143 - 13th September 2017
Version 1.02.136 -
======================================
Restore umask when creation of node fails.
Add --concise to dmsetup create for many devices with tables in one command.
Accept minor number without major in library when it knows dm major number.
Introduce single-line concise table output format: dmsetup table --concise
Version 1.02.142 - 20th July 2017
=================================
Create /dev/disk/by-part{uuid,label} and gpt-auto-root symlinks with udev.
Version 1.02.141 - 28th June 2017
=================================
Fix reusing of dm_task structure for status reading (used by dmeventd).
Add dm_percent_to_round_float for adjusted percentage rounding.
Reset array with dead rimage devices once raid gets in sync.
Drop unneeded --config option from raid dmeventd plugin.
dm_get_status_raid() handle better some incosistent md statuses.
Accept truncated files in calls to dm_stats_update_regions_from_fd().
Restore Warning by 5% increment when thin-pool is over 80% (1.02.138).
Version 1.02.140 - 3rd May 2017
===============================
Add missing configure --enable-dmfilemapd status message and fix --disable.
Version 1.02.139 - 13th April 2017
==================================
Fix assignment in _target_version() when dm task can't run.
Flush stdout on each iteration when using --count or --interval.
Show detailed error message when execvp fails while starting dmfilemapd.
Fix segmentation fault when dmfilemapd is run with no arguments.
Numerous minor dmfilemapd fixes from coverity.
Version 1.02.138 - 28th March 2017
==================================
Support additional raid5/6 configurations.
Provide dm_tree_node_add_cache_target@base compatible symbol.
Support DM_CACHE_FEATURE_METADATA2, new cache metadata format 2.
Improve code to handle mode mask for cache nodes.
Cache status check for passthrough also require trailing space.
Add extra memory page when limiting pthread stack size in dmeventd.
Avoids immediate resume when preloaded device is smaller.
Do not suppress kernel key description in dmsetup table output for dm-crypt.
Support configurable command executed from dmeventd thin plugin.
Support new R|r human readable units output format.
Thin dmeventd plugin reacts faster on lvextend failure path with umount.
Add dm_stats_bind_from_fd() to bind a stats handle from a file descriptor.
Do not try call callback when reverting activation on error path.
Fix file mapping for extents with physically adjacent extents in dmstats.
Validation vsnprintf result in runtime translate of dm_log (1.02.136).
Separate filemap extent allocation from region table in dmstats.
Fix segmentation fault when filemap region creation fails in dmstats.
Fix performance of region cleanup for failed filemap creation in dmstats.
Fix very slow region deletion with many regions in dmstats.
Version 1.02.137 - 30th November 2016
=====================================
Document raid status values.
Always exit dmsetup with success when asked to display help/version.
Version 1.02.136 - 5th November 2016
====================================
Log failure of raid device with log_error level.
Use dm_log_with_errno and translate runtime to dm_log only when needed.
Make log messages from dm and lvm library different from dmeventd.
Notice and Info messages are again logged from dmeventd and its plugins.
Dmeventd now also respects DM_ABORT_ON_INTERNAL_ERRORS as libdm based tool.
Report as non default dm logging also when logging with errno was changed.
Use log_level() macro to consistently decode message log level in dmeventd.
Still produce output when dmsetup dependency tree building finds dev missing.
Check and report pthread_sigmask() failure in dmeventd.
Check mem alloc fail in _canonicalize_field_ids().
Use unsigned math when checking more then 31 legs of raid.
@ -574,7 +381,7 @@ Version 1.02.86 - 23rd June 2014
Add DM_REPORT_FIELD_TYPE_STRING_LIST: separate string and string list fields.
Add dm_str_list to libdevmapper for string list type definition and its reuse.
Add dmsetup -S/--select to define selection criteria for dmsetup reports.
Add dm_report_init_with_selection to initialize report with selection criteria.
Add dm_report_init_with_selection to intialize report with selection criteria.
Add DM_REPORT_FIELD_TYPE_SIZE: separate number and size reporting fields.
Use RemoveOnStop for dm-event.socket systemd unit.
Document env var 'DM_DEFAULT_NAME_MANGLING_MODE' in dmsetup man page.

View File

@ -61,174 +61,3 @@ AC_DEFUN([AC_TRY_LDFLAGS],
ifelse([$4], [], [:], [$4])
fi
])
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_gcc_builtin.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_GCC_BUILTIN(BUILTIN)
#
# DESCRIPTION
#
# This macro checks if the compiler supports one of GCC's built-in
# functions; many other compilers also provide those same built-ins.
#
# The BUILTIN parameter is the name of the built-in function.
#
# If BUILTIN is supported define HAVE_<BUILTIN>. Keep in mind that since
# builtins usually start with two underscores they will be copied over
# into the HAVE_<BUILTIN> definition (e.g. HAVE___BUILTIN_EXPECT for
# __builtin_expect()).
#
# The macro caches its result in the ax_cv_have_<BUILTIN> variable (e.g.
# ax_cv_have___builtin_expect).
#
# The macro currently supports the following built-in functions:
#
# __builtin_assume_aligned
# __builtin_bswap16
# __builtin_bswap32
# __builtin_bswap64
# __builtin_choose_expr
# __builtin___clear_cache
# __builtin_clrsb
# __builtin_clrsbl
# __builtin_clrsbll
# __builtin_clz
# __builtin_clzl
# __builtin_clzll
# __builtin_complex
# __builtin_constant_p
# __builtin_ctz
# __builtin_ctzl
# __builtin_ctzll
# __builtin_expect
# __builtin_ffs
# __builtin_ffsl
# __builtin_ffsll
# __builtin_fpclassify
# __builtin_huge_val
# __builtin_huge_valf
# __builtin_huge_vall
# __builtin_inf
# __builtin_infd128
# __builtin_infd32
# __builtin_infd64
# __builtin_inff
# __builtin_infl
# __builtin_isinf_sign
# __builtin_nan
# __builtin_nand128
# __builtin_nand32
# __builtin_nand64
# __builtin_nanf
# __builtin_nanl
# __builtin_nans
# __builtin_nansf
# __builtin_nansl
# __builtin_object_size
# __builtin_parity
# __builtin_parityl
# __builtin_parityll
# __builtin_popcount
# __builtin_popcountl
# __builtin_popcountll
# __builtin_powi
# __builtin_powif
# __builtin_powil
# __builtin_prefetch
# __builtin_trap
# __builtin_types_compatible_p
# __builtin_unreachable
#
# Unsuppored built-ins will be tested with an empty parameter set and the
# result of the check might be wrong or meaningless so use with care.
#
# LICENSE
#
# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
serial 3
AC_DEFUN([AX_GCC_BUILTIN], [
AS_VAR_PUSHDEF([ac_var], [ax_cv_have_$1])
AC_CACHE_CHECK([for $1], [ac_var], [
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [
m4_case([$1],
[__builtin_assume_aligned], [$1("", 0)],
[__builtin_bswap16], [$1(0)],
[__builtin_bswap32], [$1(0)],
[__builtin_bswap64], [$1(0)],
[__builtin_choose_expr], [$1(0, 0, 0)],
[__builtin___clear_cache], [$1("", "")],
[__builtin_clrsb], [$1(0)],
[__builtin_clrsbl], [$1(0)],
[__builtin_clrsbll], [$1(0)],
[__builtin_clz], [$1(0)],
[__builtin_clzl], [$1(0)],
[__builtin_clzll], [$1(0)],
[__builtin_complex], [$1(0.0, 0.0)],
[__builtin_constant_p], [$1(0)],
[__builtin_ctz], [$1(0)],
[__builtin_ctzl], [$1(0)],
[__builtin_ctzll], [$1(0)],
[__builtin_expect], [$1(0, 0)],
[__builtin_ffs], [$1(0)],
[__builtin_ffsl], [$1(0)],
[__builtin_ffsll], [$1(0)],
[__builtin_fpclassify], [$1(0, 1, 2, 3, 4, 0.0)],
[__builtin_huge_val], [$1()],
[__builtin_huge_valf], [$1()],
[__builtin_huge_vall], [$1()],
[__builtin_inf], [$1()],
[__builtin_infd128], [$1()],
[__builtin_infd32], [$1()],
[__builtin_infd64], [$1()],
[__builtin_inff], [$1()],
[__builtin_infl], [$1()],
[__builtin_isinf_sign], [$1(0.0)],
[__builtin_nan], [$1("")],
[__builtin_nand128], [$1("")],
[__builtin_nand32], [$1("")],
[__builtin_nand64], [$1("")],
[__builtin_nanf], [$1("")],
[__builtin_nanl], [$1("")],
[__builtin_nans], [$1("")],
[__builtin_nansf], [$1("")],
[__builtin_nansl], [$1("")],
[__builtin_object_size], [$1("", 0)],
[__builtin_parity], [$1(0)],
[__builtin_parityl], [$1(0)],
[__builtin_parityll], [$1(0)],
[__builtin_popcount], [$1(0)],
[__builtin_popcountl], [$1(0)],
[__builtin_popcountll], [$1(0)],
[__builtin_powi], [$1(0, 0)],
[__builtin_powif], [$1(0, 0)],
[__builtin_powil], [$1(0, 0)],
[__builtin_prefetch], [$1("")],
[__builtin_trap], [$1()],
[__builtin_types_compatible_p], [$1(int, int)],
[__builtin_unreachable], [$1()],
[m4_warn([syntax], [Unsupported built-in $1, the test may fail])
$1()]
)
])],
[AS_VAR_SET([ac_var], [yes])],
[AS_VAR_SET([ac_var], [no])])
])
AS_IF([test yes = AS_VAR_GET([ac_var])],
[AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_$1), 1,
[Define to 1 if the system has the `$1' built-in function])], [])
AS_VAR_POPDEF([ac_var])
])

306
aclocal.m4 vendored
View File

@ -1,6 +1,6 @@
# generated automatically by aclocal 1.16.2 -*- Autoconf -*-
# generated automatically by aclocal 1.15 -*- Autoconf -*-
# Copyright (C) 1996-2020 Free Software Foundation, Inc.
# Copyright (C) 1996-2014 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@ -13,7 +13,7 @@
m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_python_module.html
# http://www.gnu.org/software/autoconf-archive/ax_python_module.html
# ===========================================================================
#
# SYNOPSIS
@ -37,7 +37,7 @@ m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 9
#serial 8
AU_ALIAS([AC_PYTHON_MODULE], [AX_PYTHON_MODULE])
AC_DEFUN([AX_PYTHON_MODULE],[
@ -69,63 +69,32 @@ AC_DEFUN([AX_PYTHON_MODULE],[
fi
])
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 11 (pkg-config-0.29.1)
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 1 (pkg-config-0.24)
#
# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
dnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
dnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
dnl the Free Software Foundation; either version 2 of the License, or
dnl (at your option) any later version.
dnl
dnl This program is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of
dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
dnl General Public License for more details.
dnl
dnl You should have received a copy of the GNU General Public License
dnl along with this program; if not, write to the Free Software
dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
dnl 02111-1307, USA.
dnl
dnl As a special exception to the GNU General Public License, if you
dnl distribute this file as part of a program that contains a
dnl configuration script generated by Autoconf, you may include it under
dnl the same distribution terms that you use for the rest of that
dnl program.
dnl PKG_PREREQ(MIN-VERSION)
dnl -----------------------
dnl Since: 0.29
dnl
dnl Verify that the version of the pkg-config macros are at least
dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's
dnl installed version of pkg-config, this checks the developer's version
dnl of pkg.m4 when generating configure.
dnl
dnl To ensure that this macro is defined, also add:
dnl m4_ifndef([PKG_PREREQ],
dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])])
dnl
dnl See the "Since" comment for each macro you use to see what version
dnl of the macros you require.
m4_defun([PKG_PREREQ],
[m4_define([PKG_MACROS_VERSION], [0.29.1])
m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1,
[m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])])
])dnl PKG_PREREQ
dnl PKG_PROG_PKG_CONFIG([MIN-VERSION])
dnl ----------------------------------
dnl Since: 0.16
dnl
dnl Search for the pkg-config tool and set the PKG_CONFIG variable to
dnl first found in the path. Checks that the version of pkg-config found
dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is
dnl used since that's the first version where most current features of
dnl pkg-config existed.
# PKG_PROG_PKG_CONFIG([MIN-VERSION])
# ----------------------------------
AC_DEFUN([PKG_PROG_PKG_CONFIG],
[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$])
@ -147,19 +116,18 @@ if test -n "$PKG_CONFIG"; then
PKG_CONFIG=""
fi
fi[]dnl
])dnl PKG_PROG_PKG_CONFIG
])# PKG_PROG_PKG_CONFIG
dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------------------------------
dnl Since: 0.18
dnl
dnl Check to see whether a particular set of modules exists. Similar to
dnl PKG_CHECK_MODULES(), but does not set variables or print errors.
dnl
dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
dnl only at the first occurence in configure.ac, so if the first place
dnl it's called might be skipped (such as if it is within an "if", you
dnl have to call PKG_CHECK_EXISTS manually
# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
#
# Check to see whether a particular set of modules exists. Similar
# to PKG_CHECK_MODULES(), but does not set variables or print errors.
#
# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
# only at the first occurence in configure.ac, so if the first place
# it's called might be skipped (such as if it is within an "if", you
# have to call PKG_CHECK_EXISTS manually
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_EXISTS],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
if test -n "$PKG_CONFIG" && \
@ -169,10 +137,8 @@ m4_ifvaln([$3], [else
$3])dnl
fi])
dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
dnl ---------------------------------------------
dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting
dnl pkg_failed based on the result.
# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
# ---------------------------------------------
m4_define([_PKG_CONFIG],
[if test -n "$$1"; then
pkg_cv_[]$1="$$1"
@ -184,11 +150,10 @@ m4_define([_PKG_CONFIG],
else
pkg_failed=untried
fi[]dnl
])dnl _PKG_CONFIG
])# _PKG_CONFIG
dnl _PKG_SHORT_ERRORS_SUPPORTED
dnl ---------------------------
dnl Internal check to see if pkg-config supports short errors.
# _PKG_SHORT_ERRORS_SUPPORTED
# -----------------------------
AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
@ -196,17 +161,19 @@ if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
else
_pkg_short_errors_supported=no
fi[]dnl
])dnl _PKG_SHORT_ERRORS_SUPPORTED
])# _PKG_SHORT_ERRORS_SUPPORTED
dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl --------------------------------------------------------------
dnl Since: 0.4.0
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES might not happen, you should be sure to include an
dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
# [ACTION-IF-NOT-FOUND])
#
#
# Note that if there is a possibility the first call to
# PKG_CHECK_MODULES might not happen, you should be sure to include an
# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
#
#
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_MODULES],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
@ -260,40 +227,16 @@ else
AC_MSG_RESULT([yes])
$3
fi[]dnl
])dnl PKG_CHECK_MODULES
])# PKG_CHECK_MODULES
dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl ---------------------------------------------------------------------
dnl Since: 0.29
dnl
dnl Checks for existence of MODULES and gathers its build flags with
dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags
dnl and VARIABLE-PREFIX_LIBS from --libs.
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to
dnl include an explicit call to PKG_PROG_PKG_CONFIG in your
dnl configure.ac.
AC_DEFUN([PKG_CHECK_MODULES_STATIC],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
_save_PKG_CONFIG=$PKG_CONFIG
PKG_CONFIG="$PKG_CONFIG --static"
PKG_CHECK_MODULES($@)
PKG_CONFIG=$_save_PKG_CONFIG[]dnl
])dnl PKG_CHECK_MODULES_STATIC
dnl PKG_INSTALLDIR([DIRECTORY])
dnl -------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable pkgconfigdir as the location where a module
dnl should install pkg-config .pc files. By default the directory is
dnl $libdir/pkgconfig, but the default can be changed by passing
dnl DIRECTORY. The user can override through the --with-pkgconfigdir
dnl parameter.
# PKG_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable pkgconfigdir as the location where a module
# should install pkg-config .pc files. By default the directory is
# $libdir/pkgconfig, but the default can be changed by passing
# DIRECTORY. The user can override through the --with-pkgconfigdir
# parameter.
AC_DEFUN([PKG_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])])
m4_pushdef([pkg_description],
@ -304,18 +247,16 @@ AC_ARG_WITH([pkgconfigdir],
AC_SUBST([pkgconfigdir], [$with_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_INSTALLDIR
]) dnl PKG_INSTALLDIR
dnl PKG_NOARCH_INSTALLDIR([DIRECTORY])
dnl --------------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable noarch_pkgconfigdir as the location where a
dnl module should install arch-independent pkg-config .pc files. By
dnl default the directory is $datadir/pkgconfig, but the default can be
dnl changed by passing DIRECTORY. The user can override through the
dnl --with-noarch-pkgconfigdir parameter.
# PKG_NOARCH_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable noarch_pkgconfigdir as the location where a
# module should install arch-independent pkg-config .pc files. By
# default the directory is $datadir/pkgconfig, but the default can be
# changed by passing DIRECTORY. The user can override through the
# --with-noarch-pkgconfigdir parameter.
AC_DEFUN([PKG_NOARCH_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])])
m4_pushdef([pkg_description],
@ -326,15 +267,13 @@ AC_ARG_WITH([noarch-pkgconfigdir],
AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_NOARCH_INSTALLDIR
]) dnl PKG_NOARCH_INSTALLDIR
dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------
dnl Since: 0.28
dnl
dnl Retrieves the value of the pkg-config variable for the given module.
# PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
# [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
# -------------------------------------------
# Retrieves the value of the pkg-config variable for the given module.
AC_DEFUN([PKG_CHECK_VAR],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl
@ -343,77 +282,9 @@ _PKG_CONFIG([$1], [variable="][$3]["], [$2])
AS_VAR_COPY([$1], [pkg_cv_][$1])
AS_VAR_IF([$1], [""], [$5], [$4])dnl
])dnl PKG_CHECK_VAR
])# PKG_CHECK_VAR
dnl PKG_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND],
dnl [DESCRIPTION], [DEFAULT])
dnl ------------------------------------------
dnl
dnl Prepare a "--with-" configure option using the lowercase
dnl [VARIABLE-PREFIX] name, merging the behaviour of AC_ARG_WITH and
dnl PKG_CHECK_MODULES in a single macro.
AC_DEFUN([PKG_WITH_MODULES],
[
m4_pushdef([with_arg], m4_tolower([$1]))
m4_pushdef([description],
[m4_default([$5], [build with ]with_arg[ support])])
m4_pushdef([def_arg], [m4_default([$6], [auto])])
m4_pushdef([def_action_if_found], [AS_TR_SH([with_]with_arg)=yes])
m4_pushdef([def_action_if_not_found], [AS_TR_SH([with_]with_arg)=no])
m4_case(def_arg,
[yes],[m4_pushdef([with_without], [--without-]with_arg)],
[m4_pushdef([with_without],[--with-]with_arg)])
AC_ARG_WITH(with_arg,
AS_HELP_STRING(with_without, description[ @<:@default=]def_arg[@:>@]),,
[AS_TR_SH([with_]with_arg)=def_arg])
AS_CASE([$AS_TR_SH([with_]with_arg)],
[yes],[PKG_CHECK_MODULES([$1],[$2],$3,$4)],
[auto],[PKG_CHECK_MODULES([$1],[$2],
[m4_n([def_action_if_found]) $3],
[m4_n([def_action_if_not_found]) $4])])
m4_popdef([with_arg])
m4_popdef([description])
m4_popdef([def_arg])
])dnl PKG_WITH_MODULES
dnl PKG_HAVE_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [DESCRIPTION], [DEFAULT])
dnl -----------------------------------------------
dnl
dnl Convenience macro to trigger AM_CONDITIONAL after PKG_WITH_MODULES
dnl check._[VARIABLE-PREFIX] is exported as make variable.
AC_DEFUN([PKG_HAVE_WITH_MODULES],
[
PKG_WITH_MODULES([$1],[$2],,,[$3],[$4])
AM_CONDITIONAL([HAVE_][$1],
[test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"])
])dnl PKG_HAVE_WITH_MODULES
dnl PKG_HAVE_DEFINE_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [DESCRIPTION], [DEFAULT])
dnl ------------------------------------------------------
dnl
dnl Convenience macro to run AM_CONDITIONAL and AC_DEFINE after
dnl PKG_WITH_MODULES check. HAVE_[VARIABLE-PREFIX] is exported as make
dnl and preprocessor variable.
AC_DEFUN([PKG_HAVE_DEFINE_WITH_MODULES],
[
PKG_HAVE_WITH_MODULES([$1],[$2],[$3],[$4])
AS_IF([test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"],
[AC_DEFINE([HAVE_][$1], 1, [Enable ]m4_tolower([$1])[ support])])
])dnl PKG_HAVE_DEFINE_WITH_MODULES
# Copyright (C) 1999-2020 Free Software Foundation, Inc.
# Copyright (C) 1999-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@ -447,11 +318,8 @@ AC_DEFUN([AM_PATH_PYTHON],
dnl Find a Python interpreter. Python versions prior to 2.0 are not
dnl supported. (2.0 was released on October 16, 2000).
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],
[python python2 python3 dnl
python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 dnl
python3.2 python3.1 python3.0 dnl
python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 dnl
python2.0])
[python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 dnl
python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0])
AC_ARG_VAR([PYTHON], [the Python interpreter])
@ -496,14 +364,12 @@ AC_DEFUN([AM_PATH_PYTHON],
m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])])
else
dnl Query Python for its version number. Although site.py simply uses
dnl sys.version[:3], printing that failed with Python 3.10, since the
dnl trailing zero was eliminated. So now we output just the major
dnl and minor version numbers, as numbers. Apparently the tertiary
dnl version is not of interest.
dnl Query Python for its version number. Getting [:3] seems to be
dnl the best way to do this; it's what "site.py" does in the standard
dnl library.
AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version],
[am_cv_python_version=`$PYTHON -c "import sys; print('%u.%u' % sys.version_info[[:2]])"`])
[am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`])
AC_SUBST([PYTHON_VERSION], [$am_cv_python_version])
dnl Use the values of $prefix and $exec_prefix for the corresponding
@ -653,7 +519,7 @@ for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]]
sys.exit(sys.hexversion < minverhex)"
AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])])
# Copyright (C) 2001-2020 Free Software Foundation, Inc.
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,

View File

@ -1,40 +0,0 @@
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This file is part of the device-mapper userspace tools.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU Lesser General Public License v.2.1.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Uncomment this to build the simple radix tree. You'll need to make clean too.
# Comment to build the advanced radix tree.
#base/data-struct/radix-tree.o: CFLAGS += -DSIMPLE_RADIX_TREE
# NOTE: this Makefile only works as 'include' for toplevel Makefile
# which defined all top_* variables
BASE_SOURCE=\
base/data-struct/hash.c \
base/data-struct/list.c \
base/data-struct/radix-tree.c
BASE_TARGET = base/libbase.a
BASE_DEPENDS = $(BASE_SOURCE:%.c=%.d)
BASE_OBJECTS = $(BASE_SOURCE:%.c=%.o)
CLEAN_TARGETS += $(BASE_DEPENDS) $(BASE_OBJECTS) \
$(BASE_SOURCE:%.c=%.gcda) \
$(BASE_SOURCE:%.c=%.gcno) \
$(BASE_TARGET)
$(BASE_TARGET): $(BASE_OBJECTS)
@echo " [AR] $@"
$(Q) $(RM) $@
$(Q) $(AR) rsv $@ $(BASE_OBJECTS) > /dev/null
ifeq ("$(DEPENDS)","yes")
-include $(BASE_DEPENDS)
endif

View File

@ -1,477 +0,0 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "device_mapper/misc/dmlib.h"
#include "base/memory/zalloc.h"
#include "hash.h"
struct dm_hash_node {
struct dm_hash_node *next;
void *data;
unsigned data_len;
unsigned keylen;
unsigned hash;
char key[0];
};
struct dm_hash_table {
unsigned num_nodes;
unsigned num_hint;
unsigned mask_slots; /* (slots - 1) -> used as hash mask */
unsigned collisions; /* Collissions of hash keys */
unsigned search; /* How many keys were searched */
unsigned found; /* How many nodes were found */
unsigned same_hash; /* Was there a colision with same masked hash and len ? */
struct dm_hash_node **slots;
};
#if 0 /* TO BE REMOVED */
static unsigned _hash(const void *key, unsigned len)
{
/* Permutation of the Integers 0 through 255 */
static unsigned char _nums[] = {
1, 14, 110, 25, 97, 174, 132, 119, 138, 170, 125, 118, 27, 233, 140, 51,
87, 197, 177, 107, 234, 169, 56, 68, 30, 7, 173, 73, 188, 40, 36, 65,
49, 213, 104, 190, 57, 211, 148, 223, 48, 115, 15, 2, 67, 186, 210, 28,
12, 181, 103, 70, 22, 58, 75, 78, 183, 167, 238, 157, 124, 147, 172,
144,
176, 161, 141, 86, 60, 66, 128, 83, 156, 241, 79, 46, 168, 198, 41, 254,
178, 85, 253, 237, 250, 154, 133, 88, 35, 206, 95, 116, 252, 192, 54,
221,
102, 218, 255, 240, 82, 106, 158, 201, 61, 3, 89, 9, 42, 155, 159, 93,
166, 80, 50, 34, 175, 195, 100, 99, 26, 150, 16, 145, 4, 33, 8, 189,
121, 64, 77, 72, 208, 245, 130, 122, 143, 55, 105, 134, 29, 164, 185,
194,
193, 239, 101, 242, 5, 171, 126, 11, 74, 59, 137, 228, 108, 191, 232,
139,
6, 24, 81, 20, 127, 17, 91, 92, 251, 151, 225, 207, 21, 98, 113, 112,
84, 226, 18, 214, 199, 187, 13, 32, 94, 220, 224, 212, 247, 204, 196,
43,
249, 236, 45, 244, 111, 182, 153, 136, 129, 90, 217, 202, 19, 165, 231,
71,
230, 142, 96, 227, 62, 179, 246, 114, 162, 53, 160, 215, 205, 180, 47,
109,
44, 38, 31, 149, 135, 0, 216, 52, 63, 23, 37, 69, 39, 117, 146, 184,
163, 200, 222, 235, 248, 243, 219, 10, 152, 131, 123, 229, 203, 76, 120,
209
};
const uint8_t *str = key;
unsigned h = 0, g;
unsigned i;
for (i = 0; i < len; i++) {
h <<= 4;
h += _nums[*str++];
g = h & ((unsigned) 0xf << 16u);
if (g) {
h ^= g >> 16u;
h ^= g >> 5u;
}
}
return h;
}
/* In-kernel DM hashing, still lots of collisions */
static unsigned _hash_in_kernel(const char *key, unsigned len)
{
const unsigned char *str = (unsigned char *)key;
const unsigned hash_mult = 2654435387U;
unsigned hash = 0, i;
for (i = 0; i < len; ++i)
hash = (hash + str[i]) * hash_mult;
return hash;
}
#endif
#undef get16bits
#if (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
#define get16bits(d) (*((const uint16_t *) (d)))
#endif
#if !defined (get16bits)
#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+(uint32_t)(((const uint8_t *)(d))[0]) )
#endif
/*
* Adapted Bob Jenkins hash to read by 2 bytes if possible.
* https://secure.wikimedia.org/wikipedia/en/wiki/Jenkins_hash_function
*
* Reduces amount of hash collisions
*/
static unsigned _hash(const void *key, unsigned len)
{
const uint8_t *str = (uint8_t*) key;
unsigned hash = 0, i;
unsigned sz = len / 2;
for(i = 0; i < sz; ++i) {
hash += get16bits(str + 2 * i);
hash += (hash << 10);
hash ^= (hash >> 6);
}
if (len & 1) {
hash += str[len - 1];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash;
}
static struct dm_hash_node *_create_node(const void *key, unsigned len)
{
struct dm_hash_node *n = malloc(sizeof(*n) + len);
if (n) {
memcpy(n->key, key, len);
n->keylen = len;
}
return n;
}
struct dm_hash_table *dm_hash_create(unsigned size_hint)
{
size_t len;
unsigned new_size = 16u;
struct dm_hash_table *hc = zalloc(sizeof(*hc));
if (!hc) {
log_error("Failed to allocate memory for hash.");
return 0;
}
hc->num_hint = size_hint;
/* round size hint up to a power of two */
while (new_size < size_hint)
new_size = new_size << 1;
hc->mask_slots = new_size - 1;
len = sizeof(*(hc->slots)) * new_size;
if (!(hc->slots = zalloc(len))) {
free(hc);
log_error("Failed to allocate slots for hash.");
return 0;
}
return hc;
}
static void _free_nodes(struct dm_hash_table *t)
{
struct dm_hash_node *c, *n;
unsigned i;
#ifdef DEBUG
log_debug("Free hash hint:%d slots:%d nodes:%d (s:%d f:%d c:%d h:%d)",
t->num_hint, t->mask_slots + 1, t->num_nodes,
t->search, t->found, t->collisions, t->same_hash);
#endif
if (!t->num_nodes)
return;
for (i = 0; i <= t->mask_slots; i++)
for (c = t->slots[i]; c; c = n) {
n = c->next;
free(c);
}
}
void dm_hash_destroy(struct dm_hash_table *t)
{
_free_nodes(t);
free(t->slots);
free(t);
}
static struct dm_hash_node **_findh(struct dm_hash_table *t, const void *key,
uint32_t len, unsigned hash)
{
struct dm_hash_node **c;
++t->search;
for (c = &t->slots[hash & t->mask_slots]; *c; c = &((*c)->next)) {
if ((*c)->keylen == len && (*c)->hash == hash) {
if (!memcmp(key, (*c)->key, len)) {
++t->found;
break;
}
++t->same_hash;
}
++t->collisions;
}
return c;
}
static struct dm_hash_node **_find(struct dm_hash_table *t, const void *key,
uint32_t len)
{
return _findh(t, key, len, _hash(key, len));
}
void *dm_hash_lookup_binary(struct dm_hash_table *t, const void *key,
uint32_t len)
{
struct dm_hash_node **c = _find(t, key, len);
return *c ? (*c)->data : 0;
}
int dm_hash_insert_binary(struct dm_hash_table *t, const void *key,
uint32_t len, void *data)
{
unsigned hash = _hash(key, len);
struct dm_hash_node **c = _findh(t, key, len, hash);
if (*c)
(*c)->data = data;
else {
struct dm_hash_node *n = _create_node(key, len);
if (!n)
return 0;
n->data = data;
n->hash = hash;
n->next = 0;
*c = n;
t->num_nodes++;
}
return 1;
}
void dm_hash_remove_binary(struct dm_hash_table *t, const void *key,
uint32_t len)
{
struct dm_hash_node **c = _find(t, key, len);
if (*c) {
struct dm_hash_node *old = *c;
*c = (*c)->next;
free(old);
t->num_nodes--;
}
}
void *dm_hash_lookup(struct dm_hash_table *t, const char *key)
{
return dm_hash_lookup_binary(t, key, strlen(key) + 1);
}
int dm_hash_insert(struct dm_hash_table *t, const char *key, void *data)
{
return dm_hash_insert_binary(t, key, strlen(key) + 1, data);
}
void dm_hash_remove(struct dm_hash_table *t, const char *key)
{
dm_hash_remove_binary(t, key, strlen(key) + 1);
}
static struct dm_hash_node **_find_str_with_val(struct dm_hash_table *t,
const void *key, const void *val,
uint32_t len, uint32_t val_len)
{
struct dm_hash_node **c;
unsigned h;
h = _hash(key, len) & t->mask_slots;
for (c = &t->slots[h]; *c; c = &((*c)->next)) {
if ((*c)->keylen != len)
continue;
if (!memcmp(key, (*c)->key, len) && (*c)->data) {
if (((*c)->data_len == val_len) &&
!memcmp(val, (*c)->data, val_len))
return c;
}
}
return NULL;
}
int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len)
{
struct dm_hash_node *n;
struct dm_hash_node *first;
int len = strlen(key) + 1;
unsigned h;
n = _create_node(key, len);
if (!n)
return 0;
n->data = (void *)val;
n->data_len = val_len;
h = _hash(key, len) & t->mask_slots;
first = t->slots[h];
if (first)
n->next = first;
else
n->next = 0;
t->slots[h] = n;
t->num_nodes++;
return 1;
}
/*
* Look through multiple entries with the same key for one that has a
* matching val and return that. If none have maching val, return NULL.
*/
void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len)
{
struct dm_hash_node **c;
c = _find_str_with_val(t, key, val, strlen(key) + 1, val_len);
return (c && *c) ? (*c)->data : 0;
}
/*
* Look through multiple entries with the same key for one that has a
* matching val and remove that.
*/
void dm_hash_remove_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len)
{
struct dm_hash_node **c;
c = _find_str_with_val(t, key, val, strlen(key) + 1, val_len);
if (c && *c) {
struct dm_hash_node *old = *c;
*c = (*c)->next;
free(old);
t->num_nodes--;
}
}
/*
* Look up the value for a key and count how many
* entries have the same key.
*
* If no entries have key, return NULL and set count to 0.
*
* If one entry has the key, the function returns the val,
* and sets count to 1.
*
* If N entries have the key, the function returns the val
* from the first entry, and sets count to N.
*/
void *dm_hash_lookup_with_count(struct dm_hash_table *t, const char *key, int *count)
{
struct dm_hash_node **c;
struct dm_hash_node **c1 = NULL;
uint32_t len = strlen(key) + 1;
unsigned h;
*count = 0;
h = _hash(key, len) & t->mask_slots;
for (c = &t->slots[h]; *c; c = &((*c)->next)) {
if ((*c)->keylen != len)
continue;
if (!memcmp(key, (*c)->key, len)) {
(*count)++;
if (!c1)
c1 = c;
}
}
if (!c1)
return NULL;
else
return *c1 ? (*c1)->data : 0;
}
unsigned dm_hash_get_num_entries(struct dm_hash_table *t)
{
return t->num_nodes;
}
void dm_hash_iter(struct dm_hash_table *t, dm_hash_iterate_fn f)
{
struct dm_hash_node *c, *n;
unsigned i;
for (i = 0; i <= t->mask_slots; i++)
for (c = t->slots[i]; c; c = n) {
n = c->next;
f(c->data);
}
}
void dm_hash_wipe(struct dm_hash_table *t)
{
_free_nodes(t);
memset(t->slots, 0, sizeof(struct dm_hash_node *) * (t->mask_slots + 1));
t->num_nodes = t->collisions = t->search = t->same_hash = 0u;
}
char *dm_hash_get_key(struct dm_hash_table *t __attribute__((unused)),
struct dm_hash_node *n)
{
return n->key;
}
void *dm_hash_get_data(struct dm_hash_table *t __attribute__((unused)),
struct dm_hash_node *n)
{
return n->data;
}
static struct dm_hash_node *_next_slot(struct dm_hash_table *t, unsigned s)
{
struct dm_hash_node *c = NULL;
unsigned i;
for (i = s; i <= t->mask_slots && !c; i++)
c = t->slots[i];
return c;
}
struct dm_hash_node *dm_hash_get_first(struct dm_hash_table *t)
{
return _next_slot(t, 0);
}
struct dm_hash_node *dm_hash_get_next(struct dm_hash_table *t, struct dm_hash_node *n)
{
return n->next ? n->next : _next_slot(t, (n->hash & t->mask_slots) + 1);
}

View File

@ -1,94 +0,0 @@
#ifndef BASE_DATA_STRUCT_HASH_H
#define BASE_DATA_STRUCT_HASH_H
#include <stdint.h>
//----------------------------------------------------------------
struct dm_hash_table;
struct dm_hash_node;
typedef void (*dm_hash_iterate_fn) (void *data);
struct dm_hash_table *dm_hash_create(unsigned size_hint)
__attribute__((__warn_unused_result__));
void dm_hash_destroy(struct dm_hash_table *t);
void dm_hash_wipe(struct dm_hash_table *t);
void *dm_hash_lookup(struct dm_hash_table *t, const char *key);
int dm_hash_insert(struct dm_hash_table *t, const char *key, void *data);
void dm_hash_remove(struct dm_hash_table *t, const char *key);
void *dm_hash_lookup_binary(struct dm_hash_table *t, const void *key, uint32_t len);
int dm_hash_insert_binary(struct dm_hash_table *t, const void *key, uint32_t len,
void *data);
void dm_hash_remove_binary(struct dm_hash_table *t, const void *key, uint32_t len);
unsigned dm_hash_get_num_entries(struct dm_hash_table *t);
void dm_hash_iter(struct dm_hash_table *t, dm_hash_iterate_fn f);
char *dm_hash_get_key(struct dm_hash_table *t, struct dm_hash_node *n);
void *dm_hash_get_data(struct dm_hash_table *t, struct dm_hash_node *n);
struct dm_hash_node *dm_hash_get_first(struct dm_hash_table *t);
struct dm_hash_node *dm_hash_get_next(struct dm_hash_table *t, struct dm_hash_node *n);
/*
* dm_hash_insert() replaces the value of an existing
* entry with a matching key if one exists. Otherwise
* it adds a new entry.
*
* dm_hash_insert_with_val() inserts a new entry if
* another entry with the same key already exists.
* val_len is the size of the data being inserted.
*
* If two entries with the same key exist,
* (added using dm_hash_insert_allow_multiple), then:
* . dm_hash_lookup() returns the first one it finds, and
* dm_hash_lookup_with_val() returns the one with a matching
* val_len/val.
* . dm_hash_remove() removes the first one it finds, and
* dm_hash_remove_with_val() removes the one with a matching
* val_len/val.
*
* If a single entry with a given key exists, and it has
* zero val_len, then:
* . dm_hash_lookup() returns it
* . dm_hash_lookup_with_val(val_len=0) returns it
* . dm_hash_remove() removes it
* . dm_hash_remove_with_val(val_len=0) removes it
*
* dm_hash_lookup_with_count() is a single call that will
* both lookup a key's value and check if there is more
* than one entry with the given key.
*
* (It is not meant to retrieve all the entries with the
* given key. In the common case where a single entry exists
* for the key, it is useful to have a single call that will
* both look up the value and indicate if multiple values
* exist for the key.)
*
* dm_hash_lookup_with_count:
* . If no entries exist, the function returns NULL, and
* the count is set to 0.
* . If only one entry exists, the value of that entry is
* returned and count is set to 1.
* . If N entries exists, the value of the first entry is
* returned and count is set to N.
*/
void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len);
void dm_hash_remove_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len);
int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len);
void *dm_hash_lookup_with_count(struct dm_hash_table *t, const char *key, int *count);
#define dm_hash_iterate(v, h) \
for (v = dm_hash_get_first((h)); v; \
v = dm_hash_get_next((h), v))
//----------------------------------------------------------------
#endif

View File

@ -1,170 +0,0 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "list.h"
#include <assert.h>
#include <stdlib.h>
/*
* Initialise a list before use.
* The list head's next and previous pointers point back to itself.
*/
void dm_list_init(struct dm_list *head)
{
head->n = head->p = head;
}
/*
* Insert an element before 'head'.
* If 'head' is the list head, this adds an element to the end of the list.
*/
void dm_list_add(struct dm_list *head, struct dm_list *elem)
{
assert(head->n);
elem->n = head;
elem->p = head->p;
head->p->n = elem;
head->p = elem;
}
/*
* Insert an element after 'head'.
* If 'head' is the list head, this adds an element to the front of the list.
*/
void dm_list_add_h(struct dm_list *head, struct dm_list *elem)
{
assert(head->n);
elem->n = head->n;
elem->p = head;
head->n->p = elem;
head->n = elem;
}
/*
* Delete an element from its list.
* Note that this doesn't change the element itself - it may still be safe
* to follow its pointers.
*/
void dm_list_del(struct dm_list *elem)
{
elem->n->p = elem->p;
elem->p->n = elem->n;
}
/*
* Remove an element from existing list and insert before 'head'.
*/
void dm_list_move(struct dm_list *head, struct dm_list *elem)
{
dm_list_del(elem);
dm_list_add(head, elem);
}
/*
* Is the list empty?
*/
int dm_list_empty(const struct dm_list *head)
{
return head->n == head;
}
/*
* Is this the first element of the list?
*/
int dm_list_start(const struct dm_list *head, const struct dm_list *elem)
{
return elem->p == head;
}
/*
* Is this the last element of the list?
*/
int dm_list_end(const struct dm_list *head, const struct dm_list *elem)
{
return elem->n == head;
}
/*
* Return first element of the list or NULL if empty
*/
struct dm_list *dm_list_first(const struct dm_list *head)
{
return (dm_list_empty(head) ? NULL : head->n);
}
/*
* Return last element of the list or NULL if empty
*/
struct dm_list *dm_list_last(const struct dm_list *head)
{
return (dm_list_empty(head) ? NULL : head->p);
}
/*
* Return the previous element of the list, or NULL if we've reached the start.
*/
struct dm_list *dm_list_prev(const struct dm_list *head, const struct dm_list *elem)
{
return (dm_list_start(head, elem) ? NULL : elem->p);
}
/*
* Return the next element of the list, or NULL if we've reached the end.
*/
struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *elem)
{
return (dm_list_end(head, elem) ? NULL : elem->n);
}
/*
* Return the number of elements in a list by walking it.
*/
unsigned int dm_list_size(const struct dm_list *head)
{
unsigned int s = 0;
const struct dm_list *v;
dm_list_iterate(v, head)
s++;
return s;
}
/*
* Join two lists together.
* This moves all the elements of the list 'head1' to the end of the list
* 'head', leaving 'head1' empty.
*/
void dm_list_splice(struct dm_list *head, struct dm_list *head1)
{
assert(head->n);
assert(head1->n);
if (dm_list_empty(head1))
return;
head1->p->n = head;
head1->n->p = head->p;
head->p->n = head1->n;
head->p = head1->p;
dm_list_init(head1);
}

View File

@ -1,211 +0,0 @@
#ifndef BASE_DATA_STRUCT_LIST_H
#define BASE_DATA_STRUCT_LIST_H
#include "base/memory/container_of.h"
//----------------------------------------------------------------
/*
* A list consists of a list head plus elements.
* Each element has 'next' and 'previous' pointers.
* The list head's pointers point to the first and the last element.
*/
struct dm_list {
struct dm_list *n, *p;
};
/*
* String list.
*/
struct dm_str_list {
struct dm_list list;
const char *str;
};
/*
* Initialise a list before use.
* The list head's next and previous pointers point back to itself.
*/
#define DM_LIST_HEAD_INIT(name) { &(name), &(name) }
#define DM_LIST_INIT(name) struct dm_list name = DM_LIST_HEAD_INIT(name)
void dm_list_init(struct dm_list *head);
/*
* Insert an element before 'head'.
* If 'head' is the list head, this adds an element to the end of the list.
*/
void dm_list_add(struct dm_list *head, struct dm_list *elem);
/*
* Insert an element after 'head'.
* If 'head' is the list head, this adds an element to the front of the list.
*/
void dm_list_add_h(struct dm_list *head, struct dm_list *elem);
/*
* Delete an element from its list.
* Note that this doesn't change the element itself - it may still be safe
* to follow its pointers.
*/
void dm_list_del(struct dm_list *elem);
/*
* Remove an element from existing list and insert before 'head'.
*/
void dm_list_move(struct dm_list *head, struct dm_list *elem);
/*
* Join 'head1' to the end of 'head'.
*/
void dm_list_splice(struct dm_list *head, struct dm_list *head1);
/*
* Is the list empty?
*/
int dm_list_empty(const struct dm_list *head);
/*
* Is this the first element of the list?
*/
int dm_list_start(const struct dm_list *head, const struct dm_list *elem);
/*
* Is this the last element of the list?
*/
int dm_list_end(const struct dm_list *head, const struct dm_list *elem);
/*
* Return first element of the list or NULL if empty
*/
struct dm_list *dm_list_first(const struct dm_list *head);
/*
* Return last element of the list or NULL if empty
*/
struct dm_list *dm_list_last(const struct dm_list *head);
/*
* Return the previous element of the list, or NULL if we've reached the start.
*/
struct dm_list *dm_list_prev(const struct dm_list *head, const struct dm_list *elem);
/*
* Return the next element of the list, or NULL if we've reached the end.
*/
struct dm_list *dm_list_next(const struct dm_list *head, const struct dm_list *elem);
/*
* Given the address v of an instance of 'struct dm_list' called 'head'
* contained in a structure of type t, return the containing structure.
*/
#define dm_list_struct_base(v, t, head) \
container_of(v, t, head)
/*
* Given the address v of an instance of 'struct dm_list list' contained in
* a structure of type t, return the containing structure.
*/
#define dm_list_item(v, t) dm_list_struct_base((v), t, list)
/*
* Given the address v of one known element e in a known structure of type t,
* return another element f.
*/
#define dm_struct_field(v, t, e, f) \
(((t *)((uintptr_t)(v) - offsetof(t, e)))->f)
/*
* Given the address v of a known element e in a known structure of type t,
* return the list head 'list'
*/
#define dm_list_head(v, t, e) dm_struct_field(v, t, e, list)
/*
* Set v to each element of a list in turn.
*/
#define dm_list_iterate(v, head) \
for (v = (head)->n; v != head; v = v->n)
/*
* Set v to each element in a list in turn, starting from the element
* in front of 'start'.
* You can use this to 'unwind' a list_iterate and back out actions on
* already-processed elements.
* If 'start' is 'head' it walks the list backwards.
*/
#define dm_list_uniterate(v, head, start) \
for (v = (start)->p; v != head; v = v->p)
/*
* A safe way to walk a list and delete and free some elements along
* the way.
* t must be defined as a temporary variable of the same type as v.
*/
#define dm_list_iterate_safe(v, t, head) \
for (v = (head)->n, t = v->n; v != head; v = t, t = v->n)
/*
* Walk a list, setting 'v' in turn to the containing structure of each item.
* The containing structure should be the same type as 'v'.
* The 'struct dm_list' variable within the containing structure is 'field'.
*/
#define dm_list_iterate_items_gen(v, head, field) \
for (v = dm_list_struct_base((head)->n, __typeof__(*v), field); \
&v->field != (head); \
v = dm_list_struct_base(v->field.n, __typeof__(*v), field))
/*
* Walk a list, setting 'v' in turn to the containing structure of each item.
* The containing structure should be the same type as 'v'.
* The list should be 'struct dm_list list' within the containing structure.
*/
#define dm_list_iterate_items(v, head) dm_list_iterate_items_gen(v, (head), list)
/*
* Walk a list, setting 'v' in turn to the containing structure of each item.
* The containing structure should be the same type as 'v'.
* The 'struct dm_list' variable within the containing structure is 'field'.
* t must be defined as a temporary variable of the same type as v.
*/
#define dm_list_iterate_items_gen_safe(v, t, head, field) \
for (v = dm_list_struct_base((head)->n, __typeof__(*v), field), \
t = dm_list_struct_base(v->field.n, __typeof__(*v), field); \
&v->field != (head); \
v = t, t = dm_list_struct_base(v->field.n, __typeof__(*v), field))
/*
* Walk a list, setting 'v' in turn to the containing structure of each item.
* The containing structure should be the same type as 'v'.
* The list should be 'struct dm_list list' within the containing structure.
* t must be defined as a temporary variable of the same type as v.
*/
#define dm_list_iterate_items_safe(v, t, head) \
dm_list_iterate_items_gen_safe(v, t, (head), list)
/*
* Walk a list backwards, setting 'v' in turn to the containing structure
* of each item.
* The containing structure should be the same type as 'v'.
* The 'struct dm_list' variable within the containing structure is 'field'.
*/
#define dm_list_iterate_back_items_gen(v, head, field) \
for (v = dm_list_struct_base((head)->p, __typeof__(*v), field); \
&v->field != (head); \
v = dm_list_struct_base(v->field.p, __typeof__(*v), field))
/*
* Walk a list backwards, setting 'v' in turn to the containing structure
* of each item.
* The containing structure should be the same type as 'v'.
* The list should be 'struct dm_list list' within the containing structure.
*/
#define dm_list_iterate_back_items(v, head) dm_list_iterate_back_items_gen(v, (head), list)
/*
* Return the number of elements in a list by walking it.
*/
unsigned int dm_list_size(const struct dm_list *head);
//----------------------------------------------------------------
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,256 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#include "radix-tree.h"
#include "base/memory/container_of.h"
#include "base/memory/zalloc.h"
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
//----------------------------------------------------------------
// This implementation is based around nested binary trees. Very
// simple (and hopefully correct).
struct node {
struct node *left;
struct node *right;
uint8_t key;
struct node *center;
bool has_value;
union radix_value value;
};
struct radix_tree {
radix_value_dtr dtr;
void *dtr_context;
struct node *root;
};
struct radix_tree *
radix_tree_create(radix_value_dtr dtr, void *dtr_context)
{
struct radix_tree *rt = zalloc(sizeof(*rt));
if (rt) {
rt->dtr = dtr;
rt->dtr_context = dtr_context;
}
return rt;
}
// Returns the number of entries in the tree
static unsigned _destroy_tree(struct node *n, radix_value_dtr dtr, void *context)
{
unsigned r;
if (!n)
return 0;
r = _destroy_tree(n->left, dtr, context);
r += _destroy_tree(n->right, dtr, context);
r += _destroy_tree(n->center, dtr, context);
if (n->has_value) {
if (dtr)
dtr(context, n->value);
r++;
}
free(n);
return r;
}
void radix_tree_destroy(struct radix_tree *rt)
{
_destroy_tree(rt->root, rt->dtr, rt->dtr_context);
free(rt);
}
static unsigned _count(struct node *n)
{
unsigned r;
if (!n)
return 0;
r = _count(n->left);
r += _count(n->right);
r += _count(n->center);
if (n->has_value)
r++;
return r;
}
unsigned radix_tree_size(struct radix_tree *rt)
{
return _count(rt->root);
}
static struct node **_lookup(struct node **pn, uint8_t *kb, uint8_t *ke)
{
struct node *n = *pn;
if (!n || (kb == ke))
return pn;
if (*kb < n->key)
return _lookup(&n->left, kb, ke);
else if (*kb > n->key)
return _lookup(&n->right, kb, ke);
else
return _lookup(&n->center, kb + 1, ke);
}
static bool _insert(struct node **pn, uint8_t *kb, uint8_t *ke, union radix_value v)
{
struct node *n = *pn;
if (!n) {
n = zalloc(sizeof(*n));
if (!n)
return false;
n->key = *kb;
*pn = n;
}
if (kb == ke) {
n->has_value = true;
n->value = v;
return true;
}
if (*kb < n->key)
return _insert(&n->left, kb, ke, v);
else if (*kb > n->key)
return _insert(&n->right, kb, ke, v);
else
return _insert(&n->center, kb + 1, ke, v);
}
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v)
{
return _insert(&rt->root, kb, ke, v);
}
bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
{
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (!n || !n->has_value)
return false;
else {
if (rt->dtr)
rt->dtr(rt->dtr_context, n->value);
if (n->left || n->center || n->right) {
n->has_value = false;
return true;
} else {
// FIXME: delete parent if this was the last entry
free(n);
*pn = NULL;
}
return true;
}
}
unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
{
struct node **pn;
unsigned count;
pn = _lookup(&rt->root, kb, ke);
if (*pn) {
count = _destroy_tree(*pn, rt->dtr, rt->dtr_context);
*pn = NULL;
}
return count;
}
bool
radix_tree_lookup(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value *result)
{
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (n && n->has_value) {
*result = n->value;
return true;
} else
return false;
}
static void _iterate(struct node *n, struct radix_tree_iterator *it)
{
if (!n)
return;
_iterate(n->left, it);
if (n->has_value)
// FIXME: fill out the key
it->visit(it, NULL, NULL, n->value);
_iterate(n->center, it);
_iterate(n->right, it);
}
void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
struct radix_tree_iterator *it)
{
if (kb == ke)
_iterate(rt->root, it);
else {
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (n) {
if (n->has_value)
it->visit(it, NULL, NULL, n->value);
_iterate(n->center, it);
}
}
}
bool radix_tree_is_well_formed(struct radix_tree *rt)
{
return true;
}
void radix_tree_dump(struct radix_tree *rt, FILE *out)
{
}
//----------------------------------------------------------------

View File

@ -1,21 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//----------------------------------------------------------------
#ifdef SIMPLE_RADIX_TREE
#include "base/data-struct/radix-tree-simple.c"
#else
#include "base/data-struct/radix-tree-adaptive.c"
#endif
//----------------------------------------------------------------

View File

@ -1,64 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_DATA_STRUCT_RADIX_TREE_H
#define BASE_DATA_STRUCT_RADIX_TREE_H
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
//----------------------------------------------------------------
struct radix_tree;
union radix_value {
void *ptr;
uint64_t n;
};
typedef void (*radix_value_dtr)(void *context, union radix_value v);
// dtr will be called on any deleted entries. dtr may be NULL.
struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context);
void radix_tree_destroy(struct radix_tree *rt);
unsigned radix_tree_size(struct radix_tree *rt);
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v);
bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke);
// Returns the number of values removed
unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *prefix_b, uint8_t *prefix_e);
bool radix_tree_lookup(struct radix_tree *rt,
uint8_t *kb, uint8_t *ke, union radix_value *result);
// The radix tree stores entries in lexicographical order. Which means
// we can iterate entries, in order. Or iterate entries with a particular
// prefix.
struct radix_tree_iterator {
// Returns false if the iteration should end.
bool (*visit)(struct radix_tree_iterator *it,
uint8_t *kb, uint8_t *ke, union radix_value v);
};
void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
struct radix_tree_iterator *it);
// Checks that some constraints on the shape of the tree are
// being held. For debug only.
bool radix_tree_is_well_formed(struct radix_tree *rt);
void radix_tree_dump(struct radix_tree *rt, FILE *out);
//----------------------------------------------------------------
#endif

View File

@ -1,25 +0,0 @@
// Copyright (C) 2018 - 2020 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_CONTAINER_OF_H
#define BASE_MEMORY_CONTAINER_OF_H
#include <stddef.h> // offsetof
//----------------------------------------------------------------
#define container_of(v, t, head) \
((t *)((char *)(v) - offsetof(t, head)))
//----------------------------------------------------------------
#endif

View File

@ -1,27 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_ZALLOC_H
#define BASE_MEMORY_ZALLOC_H
#include <stdlib.h>
//----------------------------------------------------------------
static inline void *zalloc(size_t len)
{
return calloc(1, len);
}
//----------------------------------------------------------------
#endif

View File

@ -1,5 +1,5 @@
#
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@ -25,7 +25,6 @@ PROFILES=$(PROFILE_TEMPLATES) \
$(srcdir)/cache-smq.profile \
$(srcdir)/thin-generic.profile \
$(srcdir)/thin-performance.profile \
$(srcdir)/vdo-small.profile \
$(srcdir)/lvmdbusd.profile
include $(top_builddir)/make.tmpl
@ -33,8 +32,8 @@ include $(top_builddir)/make.tmpl
.PHONY: install_conf install_localconf install_profiles
generate:
$(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withgeneralpreamble --withcomments --ignorelocal --withspaces > example.conf.in
$(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withlocalpreamble --withcomments --withspaces local > lvmlocal.conf.in
(cat $(top_srcdir)/conf/example.conf.base && LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withcomments --ignorelocal --withspaces) > example.conf.in
(cat $(top_srcdir)/conf/lvmlocal.conf.base && LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withcomments --withspaces local) > lvmlocal.conf.in
install_conf: $(CONFSRC)
@if [ ! -e $(confdir)/$(CONFDEST) ]; then \
@ -49,9 +48,8 @@ install_localconf: $(CONFLOCAL)
fi
install_profiles: $(PROFILES)
@echo " [INSTALL] $<"
$(Q) $(INSTALL_DIR) $(profiledir)
$(Q) $(INSTALL_DATA) $(PROFILES) $(profiledir)/
$(INSTALL_DIR) $(DESTDIR)$(DEFAULT_PROFILE_DIR)
$(INSTALL_DATA) $(PROFILES) $(DESTDIR)$(DEFAULT_PROFILE_DIR)/
install_lvm2: install_conf install_localconf install_profiles

View File

@ -9,6 +9,6 @@ allocation {
cache_mode = "writethrough"
cache_policy = "smq"
cache_settings {
# currently no settings for "smq" policy
# currently no settins for "smq" policy
}
}

23
conf/example.conf.base Normal file
View File

@ -0,0 +1,23 @@
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# @DEFAULT_SYS_DIR@/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# Refer to 'man lvm.conf' for information about how settings configured in
# this file are combined with built-in values and command line options to
# arrive at the final values used by LVM.
#
# Refer to 'man lvmconfig' for information about displaying the built-in
# and configured values used by LVM.
#
# If a default value is set in this file (not commented out), then a
# new version of LVM using this file will continue using that value,
# even if the new version of LVM changes the built-in default value.
#
# To put this file in a different directory and override @DEFAULT_SYS_DIR@ set
# the environment variable LVM_SYSTEM_DIR before running the tools.
#
# N.B. Take care that each setting only appears once if uncommenting
# example settings in this file.

File diff suppressed because it is too large Load Diff

19
conf/lvmlocal.conf.base Normal file
View File

@ -0,0 +1,19 @@
# This is a local configuration file template for the LVM2 system
# which should be installed as @DEFAULT_SYS_DIR@/lvmlocal.conf .
#
# Refer to 'man lvm.conf' for information about the file layout.
#
# To put this file in a different directory and override
# @DEFAULT_SYS_DIR@ set the environment variable LVM_SYSTEM_DIR before
# running the tools.
#
# The lvmlocal.conf file is normally expected to contain only the
# "local" section which contains settings that should not be shared or
# repeated among different hosts. (But if other sections are present,
# they *will* get processed. Settings in this file override equivalent
# ones in lvm.conf and are in turn overridden by ones in any enabled
# lvm_<tag>.conf files.)
#
# Please take care that each setting only appears once if uncommenting
# example settings in this file and never copy this file between hosts.

View File

@ -28,13 +28,13 @@ local {
# main configuration file, e.g. lvm.conf. When used, it must be set to
# a unique value among all hosts sharing access to the storage,
# e.g. a host name.
#
#
# Example
# Set no system ID:
# system_id = ""
# Set the system_id to a specific name:
# system_id = "host1"
#
#
# This configuration option has an automatic default value.
# system_id = ""

View File

@ -1,24 +0,0 @@
# Demo configuration for 'VDO' using less memory.
# ~lvmconfig --type full | grep vdo
allocation {
vdo_use_compression=1
vdo_use_deduplication=1
vdo_use_metadata_hints=1
vdo_minimum_io_size=4096
vdo_block_map_cache_size_mb=128
vdo_block_map_period=16380
vdo_check_point_frequency=0
vdo_use_sparse_index=0
vdo_index_memory_size_mb=256
vdo_slab_size_mb=2048
vdo_ack_threads=1
vdo_bio_threads=1
vdo_bio_rotation=64
vdo_cpu_threads=2
vdo_hash_zone_threads=1
vdo_logical_threads=1
vdo_physical_threads=1
vdo_write_policy="auto"
vdo_max_discard=1
}

4255
configure vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -41,21 +41,6 @@ struct lv_segment *last_seg(const struct logical_volume *lv)
return ((struct lv_segment **)lv)[0];
}
const char *find_config_tree_str(struct cmd_context *cmd, int id, struct profile *profile)
{
return "STRING";
}
/*
struct logical_volume *origin_from_cow(const struct logical_volume *lv)
{
if (lv)
return lv;
__coverity_panic__();
}
*/
/* simple_memccpy() from glibc */
void *memccpy(void *dest, const void *src, int c, size_t n)
{
@ -86,17 +71,6 @@ void model_FD_ZERO(void *fdset)
((long*)fdset)[i] = 0;
}
/* Resent Coverity reports quite weird errors... */
int *__errno_location(void)
{
}
const unsigned short **__ctype_b_loc (void)
{
}
/*
* Added extra pointer check to not need these models,
* for now just keep then in file

View File

@ -15,7 +15,11 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
.PHONY: dmeventd cmirrord lvmpolld lvmlockd
.PHONY: dmeventd clvmd cmirrord lvmetad lvmpolld lvmlockd
ifneq ("@CLVMD@", "none")
SUBDIRS += clvmd
endif
ifeq ("@BUILD_CMIRRORD@", "yes")
SUBDIRS += cmirrord
@ -28,20 +32,24 @@ daemons.cflow: dmeventd.cflow
endif
endif
ifeq ("@BUILD_LVMETAD@", "yes")
SUBDIRS += lvmetad
endif
ifeq ("@BUILD_LVMPOLLD@", "yes")
SUBDIRS += lvmpolld
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
SUBDIRS += lvmlockd
SUBDIRS += lvmlockd
endif
ifeq ("@BUILD_LVMDBUSD@", "yes")
SUBDIRS += lvmdbusd
SUBDIRS += lvmdbusd
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = cmirrord dmeventd lvmpolld lvmlockd lvmdbusd
SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld lvmlockd lvmdbusd
endif
include $(top_builddir)/make.tmpl

1
daemons/clvmd/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
clvmd

103
daemons/clvmd/Makefile.in Normal file
View File

@ -0,0 +1,103 @@
#
# Copyright (C) 2004 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
CMAN_LIBS = @CMAN_LIBS@
CMAN_CFLAGS = @CMAN_CFLAGS@
CMAP_LIBS = @CMAP_LIBS@
CMAP_CFLAGS = @CMAP_CFLAGS@
CONFDB_LIBS = @CONFDB_LIBS@
CONFDB_CFLAGS = @CONFDB_CFLAGS@
CPG_LIBS = @CPG_LIBS@
CPG_CFLAGS = @CPG_CFLAGS@
DLM_LIBS = @DLM_LIBS@
DLM_CFLAGS = @DLM_CFLAGS@
QUORUM_LIBS = @QUORUM_LIBS@
QUORUM_CFLAGS = @QUORUM_CFLAGS@
SALCK_LIBS = @SALCK_LIBS@
SALCK_CFLAGS = @SALCK_CFLAGS@
SOURCES = \
clvmd-command.c \
clvmd.c \
lvm-functions.c \
refresh_clvmd.c
ifneq (,$(findstring cman,, "@CLVMD@,"))
SOURCES += clvmd-cman.c
LMLIBS += $(CMAN_LIBS) $(CONFDB_LIBS) $(DLM_LIBS)
CFLAGS += $(CMAN_CFLAGS) $(CONFDB_CFLAGS) $(DLM_CFLAGS)
DEFS += -DUSE_CMAN
endif
ifneq (,$(findstring openais,, "@CLVMD@,"))
SOURCES += clvmd-openais.c
LMLIBS += $(CONFDB_LIBS) $(CPG_LIBS) $(SALCK_LIBS)
CFLAGS += $(CONFDB_CFLAGS) $(CPG_CFLAGS) $(SALCK_CFLAGS)
DEFS += -DUSE_OPENAIS
endif
ifneq (,$(findstring corosync,, "@CLVMD@,"))
SOURCES += clvmd-corosync.c
LMLIBS += $(CMAP_LIBS) $(CONFDB_LIBS) $(CPG_LIBS) $(DLM_LIBS) $(QUORUM_LIBS)
CFLAGS += $(CMAP_CFLAGS) $(CONFDB_CFLAGS) $(CPG_CFLAGS) $(DLM_CFLAGS) $(QUORUM_CFLAGS)
DEFS += -DUSE_COROSYNC
endif
ifneq (,$(findstring singlenode,, &quot;@CLVMD@,&quot;))
SOURCES += clvmd-singlenode.c
DEFS += -DUSE_SINGLENODE
endif
ifeq ($(MAKECMDGOALS),distclean)
SOURCES += clvmd-cman.c
SOURCES += clvmd-openais.c
SOURCES += clvmd-corosync.c
SOURCES += clvmd-singlenode.c
endif
TARGETS = \
clvmd
LVMLIBS = $(LVMINTERNAL_LIBS)
ifeq ("@DMEVENTD@", "yes")
LVMLIBS += -ldevmapper-event
endif
include $(top_builddir)/make.tmpl
LVMLIBS += -ldevmapper
LIBS += $(PTHREAD_LIBS)
CFLAGS += -fno-strict-aliasing $(EXTRA_EXEC_CFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS)
INSTALL_TARGETS = \
install_clvmd
clvmd: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a
$(CC) $(CFLAGS) $(LDFLAGS) -o clvmd $(OBJECTS) \
$(LVMLIBS) $(LMLIBS) $(LIBS)
.PHONY: install_clvmd
install_clvmd: $(TARGETS)
$(INSTALL_PROGRAM) -D clvmd $(usrsbindir)/clvmd
install: $(INSTALL_TARGETS)
install_cluster: $(INSTALL_TARGETS)

85
daemons/clvmd/clvm.h Normal file
View File

@ -0,0 +1,85 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Definitions for CLVMD server and clients */
/*
* The protocol spoken over the cluster and across the local socket.
*/
#ifndef _CLVM_H
#define _CLVM_H
#include "configure.h"
#include <inttypes.h>
struct clvm_header {
uint8_t cmd; /* See below */
uint8_t flags; /* See below */
uint16_t xid; /* Transaction ID */
uint32_t clientid; /* Only used in Daemon->Daemon comms */
int32_t status; /* For replies, whether request succeeded */
uint32_t arglen; /* Length of argument below.
If >1500 then it will be passed
around the cluster in the system LV */
char node[1]; /* Actually a NUL-terminated string, node name.
If this is empty then the command is
forwarded to all cluster nodes unless
FLAG_LOCAL or FLAG_REMOTE is also set. */
char args[1]; /* Arguments for the command follow the
node name, This member is only
valid if the node name is empty */
} __attribute__ ((packed));
/* Flags */
#define CLVMD_FLAG_LOCAL 1 /* Only do this on the local node */
#define CLVMD_FLAG_SYSTEMLV 2 /* Data in system LV under my node name */
#define CLVMD_FLAG_NODEERRS 4 /* Reply has errors in node-specific portion */
#define CLVMD_FLAG_REMOTE 8 /* Do this on all nodes except for the local node */
/* Name of the local socket to communicate between lvm and clvmd */
#define CLVMD_SOCKNAME DEFAULT_RUN_DIR "/clvmd.sock"
/* Internal commands & replies */
#define CLVMD_CMD_REPLY 1
#define CLVMD_CMD_VERSION 2 /* Send version around cluster when we start */
#define CLVMD_CMD_GOAWAY 3 /* Die if received this - we are running
an incompatible version */
#define CLVMD_CMD_TEST 4 /* Just for mucking about */
#define CLVMD_CMD_LOCK 30
#define CLVMD_CMD_UNLOCK 31
/* Lock/Unlock commands */
#define CLVMD_CMD_LOCK_LV 50
#define CLVMD_CMD_LOCK_VG 51
#define CLVMD_CMD_LOCK_QUERY 52
/* Misc functions */
#define CLVMD_CMD_REFRESH 40
#define CLVMD_CMD_GET_CLUSTERNAME 41
#define CLVMD_CMD_SET_DEBUG 42
#define CLVMD_CMD_VG_BACKUP 43
#define CLVMD_CMD_RESTART 44
#define CLVMD_CMD_SYNC_NAMES 45
/* Used internally by some callers, but not part of the protocol.*/
#ifndef NODE_ALL
# define NODE_ALL "*"
# define NODE_LOCAL "."
# define NODE_REMOTE "^"
#endif
#endif

505
daemons/clvmd/clvmd-cman.c Normal file
View File

@ -0,0 +1,505 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* CMAN communication layer for clvmd.
*/
#include "clvmd-common.h"
#include <pthread.h>
#include "clvmd-comms.h"
#include "clvm.h"
#include "clvmd.h"
#include "lvm-functions.h"
#include <libdlm.h>
#include <syslog.h>
#define LOCKSPACE_NAME "clvmd"
struct clvmd_node
{
struct cman_node *node;
int clvmd_up;
};
static int num_nodes;
static struct cman_node *nodes = NULL;
static struct cman_node this_node;
static int count_nodes; /* size of allocated nodes array */
static struct dm_hash_table *node_updown_hash;
static dlm_lshandle_t *lockspace;
static cman_handle_t c_handle;
static void count_clvmds_running(void);
static void get_members(void);
static int nodeid_from_csid(const char *csid);
static int name_from_nodeid(int nodeid, char *name);
static void event_callback(cman_handle_t handle, void *private, int reason, int arg);
static void data_callback(cman_handle_t handle, void *private,
char *buf, int len, uint8_t port, int nodeid);
struct lock_wait {
pthread_cond_t cond;
pthread_mutex_t mutex;
struct dlm_lksb lksb;
};
static int _init_cluster(void)
{
node_updown_hash = dm_hash_create(100);
/* Open the cluster communication socket */
c_handle = cman_init(NULL);
if (!c_handle) {
syslog(LOG_ERR, "Can't open cluster manager socket: %m");
return -1;
}
DEBUGLOG("Connected to CMAN\n");
if (cman_start_recv_data(c_handle, data_callback, CLUSTER_PORT_CLVMD)) {
syslog(LOG_ERR, "Can't bind cluster socket: %m");
return -1;
}
if (cman_start_notification(c_handle, event_callback)) {
syslog(LOG_ERR, "Can't start cluster event listening");
return -1;
}
/* Get the cluster members list */
get_members();
count_clvmds_running();
DEBUGLOG("CMAN initialisation complete\n");
/* Create a lockspace for LV & VG locks to live in */
lockspace = dlm_open_lockspace(LOCKSPACE_NAME);
if (!lockspace) {
lockspace = dlm_create_lockspace(LOCKSPACE_NAME, 0600);
if (!lockspace) {
syslog(LOG_ERR, "Unable to create DLM lockspace for CLVM: %m");
return -1;
}
DEBUGLOG("Created DLM lockspace for CLVMD.\n");
} else
DEBUGLOG("Opened existing DLM lockspace for CLVMD.\n");
dlm_ls_pthread_init(lockspace);
DEBUGLOG("DLM initialisation complete\n");
return 0;
}
static void _cluster_init_completed(void)
{
clvmd_cluster_init_completed();
}
static int _get_main_cluster_fd(void)
{
return cman_get_fd(c_handle);
}
static int _get_num_nodes(void)
{
int i;
int nnodes = 0;
/* return number of ACTIVE nodes */
for (i=0; i<num_nodes; i++) {
if (nodes[i].cn_member && nodes[i].cn_nodeid)
nnodes++;
}
return nnodes;
}
/* send_message with the fd check removed */
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
const char *errtext)
{
int nodeid = 0;
if (csid)
memcpy(&nodeid, csid, CMAN_MAX_CSID_LEN);
if (cman_send_data(c_handle, buf, msglen, 0, CLUSTER_PORT_CLVMD, nodeid) <= 0)
{
log_error("%s", errtext);
}
return msglen;
}
static void _get_our_csid(char *csid)
{
if (this_node.cn_nodeid == 0) {
cman_get_node(c_handle, 0, &this_node);
}
memcpy(csid, &this_node.cn_nodeid, CMAN_MAX_CSID_LEN);
}
/* Call a callback routine for each node is that known (down means not running a clvmd) */
static int _cluster_do_node_callback(struct local_client *client,
void (*callback) (struct local_client *,
const char *,
int))
{
int i;
int somedown = 0;
for (i = 0; i < _get_num_nodes(); i++) {
if (nodes[i].cn_member && nodes[i].cn_nodeid) {
int up = (int)(long)dm_hash_lookup_binary(node_updown_hash, (char *)&nodes[i].cn_nodeid, sizeof(int));
callback(client, (char *)&nodes[i].cn_nodeid, up);
if (!up)
somedown = -1;
}
}
return somedown;
}
/* Process OOB messages from the cluster socket */
static void event_callback(cman_handle_t handle, void *private, int reason, int arg)
{
char namebuf[MAX_CLUSTER_MEMBER_NAME_LEN];
switch (reason) {
case CMAN_REASON_PORTCLOSED:
name_from_nodeid(arg, namebuf);
log_notice("clvmd on node %s has died\n", namebuf);
DEBUGLOG("Got port closed message, removing node %s\n", namebuf);
dm_hash_insert_binary(node_updown_hash, (char *)&arg, sizeof(int), (void *)0);
break;
case CMAN_REASON_STATECHANGE:
DEBUGLOG("Got state change message, re-reading members list\n");
get_members();
break;
#if defined(LIBCMAN_VERSION) && LIBCMAN_VERSION >= 2
case CMAN_REASON_PORTOPENED:
/* Ignore this, wait for startup message from clvmd itself */
break;
case CMAN_REASON_TRY_SHUTDOWN:
DEBUGLOG("Got try shutdown, sending OK\n");
cman_replyto_shutdown(c_handle, 1);
break;
#endif
default:
/* ERROR */
DEBUGLOG("Got unknown event callback message: %d\n", reason);
break;
}
}
static struct local_client *cman_client;
static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
const char *csid,
struct local_client **new_client)
{
/* Save this for data_callback */
cman_client = fd;
/* We never return a new client */
*new_client = NULL;
return cman_dispatch(c_handle, 0);
}
static void data_callback(cman_handle_t handle, void *private,
char *buf, int len, uint8_t port, int nodeid)
{
/* Ignore looped back messages */
if (nodeid == this_node.cn_nodeid)
return;
process_message(cman_client, buf, len, (char *)&nodeid);
}
static void _add_up_node(const char *csid)
{
/* It's up ! */
int nodeid = nodeid_from_csid(csid);
dm_hash_insert_binary(node_updown_hash, (char *)&nodeid, sizeof(int), (void *)1);
DEBUGLOG("Added new node %d to updown list\n", nodeid);
}
static void _cluster_closedown(void)
{
dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1);
cman_finish(c_handle);
}
static int is_listening(int nodeid)
{
int status;
do {
status = cman_is_listening(c_handle, nodeid, CLUSTER_PORT_CLVMD);
if (status < 0 && errno == EBUSY) { /* Don't busywait */
sleep(1);
errno = EBUSY; /* In case sleep trashes it */
}
}
while (status < 0 && errno == EBUSY);
return status;
}
/* Populate the list of CLVMDs running.
called only at startup time */
static void count_clvmds_running(void)
{
int i;
for (i = 0; i < num_nodes; i++) {
int nodeid = nodes[i].cn_nodeid;
if (is_listening(nodeid) == 1)
dm_hash_insert_binary(node_updown_hash, (void *)&nodeid, sizeof(int), (void*)1);
else
dm_hash_insert_binary(node_updown_hash, (void *)&nodeid, sizeof(int), (void*)0);
}
}
/* Get a list of active cluster members */
static void get_members(void)
{
int retnodes;
int status;
int i;
int high_nodeid = 0;
num_nodes = cman_get_node_count(c_handle);
if (num_nodes == -1) {
log_error("Unable to get node count");
return;
}
/* Not enough room for new nodes list ? */
if (num_nodes > count_nodes && nodes) {
free(nodes);
nodes = NULL;
}
if (nodes == NULL) {
count_nodes = num_nodes + 10; /* Overallocate a little */
nodes = malloc(count_nodes * sizeof(struct cman_node));
if (!nodes) {
log_error("Unable to allocate nodes array\n");
exit(5);
}
}
status = cman_get_nodes(c_handle, count_nodes, &retnodes, nodes);
if (status < 0) {
log_error("Unable to get node details");
exit(6);
}
/* Get the highest nodeid */
for (i=0; i<retnodes; i++) {
if (nodes[i].cn_nodeid > high_nodeid)
high_nodeid = nodes[i].cn_nodeid;
}
}
/* Convert a node name to a CSID */
static int _csid_from_name(char *csid, const char *name)
{
int i;
for (i = 0; i < num_nodes; i++) {
if (strcmp(name, nodes[i].cn_name) == 0) {
memcpy(csid, &nodes[i].cn_nodeid, CMAN_MAX_CSID_LEN);
return 0;
}
}
return -1;
}
/* Convert a CSID to a node name */
static int _name_from_csid(const char *csid, char *name)
{
int i;
for (i = 0; i < num_nodes; i++) {
if (memcmp(csid, &nodes[i].cn_nodeid, CMAN_MAX_CSID_LEN) == 0) {
strcpy(name, nodes[i].cn_name);
return 0;
}
}
/* Who?? */
strcpy(name, "Unknown");
return -1;
}
/* Convert a node ID to a node name */
static int name_from_nodeid(int nodeid, char *name)
{
int i;
for (i = 0; i < num_nodes; i++) {
if (nodeid == nodes[i].cn_nodeid) {
strcpy(name, nodes[i].cn_name);
return 0;
}
}
/* Who?? */
strcpy(name, "Unknown");
return -1;
}
/* Convert a CSID to a node ID */
static int nodeid_from_csid(const char *csid)
{
int nodeid;
memcpy(&nodeid, csid, CMAN_MAX_CSID_LEN);
return nodeid;
}
static int _is_quorate(void)
{
return cman_is_quorate(c_handle);
}
static void sync_ast_routine(void *arg)
{
struct lock_wait *lwait = arg;
pthread_mutex_lock(&lwait->mutex);
pthread_cond_signal(&lwait->cond);
pthread_mutex_unlock(&lwait->mutex);
}
static int _sync_lock(const char *resource, int mode, int flags, int *lockid)
{
int status;
struct lock_wait lwait;
if (!lockid) {
errno = EINVAL;
return -1;
}
DEBUGLOG("sync_lock: '%s' mode:%d flags=%d\n", resource,mode,flags);
/* Conversions need the lockid in the LKSB */
if (flags & LKF_CONVERT)
lwait.lksb.sb_lkid = *lockid;
pthread_cond_init(&lwait.cond, NULL);
pthread_mutex_init(&lwait.mutex, NULL);
pthread_mutex_lock(&lwait.mutex);
status = dlm_ls_lock(lockspace,
mode,
&lwait.lksb,
flags,
resource,
strlen(resource),
0, sync_ast_routine, &lwait, NULL, NULL);
if (status)
return status;
/* Wait for it to complete */
pthread_cond_wait(&lwait.cond, &lwait.mutex);
pthread_mutex_unlock(&lwait.mutex);
*lockid = lwait.lksb.sb_lkid;
errno = lwait.lksb.sb_status;
DEBUGLOG("sync_lock: returning lkid %x\n", *lockid);
if (lwait.lksb.sb_status)
return -1;
else
return 0;
}
static int _sync_unlock(const char *resource /* UNUSED */, int lockid)
{
int status;
struct lock_wait lwait;
DEBUGLOG("sync_unlock: '%s' lkid:%x\n", resource, lockid);
pthread_cond_init(&lwait.cond, NULL);
pthread_mutex_init(&lwait.mutex, NULL);
pthread_mutex_lock(&lwait.mutex);
status = dlm_ls_unlock(lockspace, lockid, 0, &lwait.lksb, &lwait);
if (status)
return status;
/* Wait for it to complete */
pthread_cond_wait(&lwait.cond, &lwait.mutex);
pthread_mutex_unlock(&lwait.mutex);
errno = lwait.lksb.sb_status;
if (lwait.lksb.sb_status != EUNLOCK)
return -1;
else
return 0;
}
static int _get_cluster_name(char *buf, int buflen)
{
cman_cluster_t cluster_info;
int status;
status = cman_get_cluster(c_handle, &cluster_info);
if (!status) {
strncpy(buf, cluster_info.ci_name, buflen);
}
return status;
}
static struct cluster_ops _cluster_cman_ops = {
.name = "cman",
.cluster_init_completed = _cluster_init_completed,
.cluster_send_message = _cluster_send_message,
.name_from_csid = _name_from_csid,
.csid_from_name = _csid_from_name,
.get_num_nodes = _get_num_nodes,
.cluster_fd_callback = _cluster_fd_callback,
.get_main_cluster_fd = _get_main_cluster_fd,
.cluster_do_node_callback = _cluster_do_node_callback,
.is_quorate = _is_quorate,
.get_our_csid = _get_our_csid,
.add_up_node = _add_up_node,
.cluster_closedown = _cluster_closedown,
.get_cluster_name = _get_cluster_name,
.sync_lock = _sync_lock,
.sync_unlock = _sync_unlock,
};
struct cluster_ops *init_cman_cluster(void)
{
if (!_init_cluster())
return &_cluster_cman_ops;
else
return NULL;
}

View File

@ -0,0 +1,414 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
CLVMD Cluster LVM daemon command processor.
To add commands to the daemon simply add a processor in do_command and return
and messages back in buf and the length in *retlen. The initial value of
buflen is the maximum size of the buffer. if buf is not large enough then it
may be reallocated by the functions in here to a suitable size bearing in
mind that anything larger than the passed-in size will have to be returned
using the system LV and so performance will suffer.
The status return will be negated and passed back to the originating node.
pre- and post- command routines are called only on the local node. The
purpose is primarily to get and release locks, though the pre- routine should
also do any other local setups required by the command (if any) and can
return a failure code that prevents the command from being distributed around
the cluster
The pre- and post- routines are run in their own thread so can block as long
they like, do_command is run in the main clvmd thread so should not block for
too long. If the pre-command returns an error code (!=0) then the command
will not be propogated around the cluster but the post-command WILL be called
Also note that the pre and post routine are *always* called on the local
node, even if the command to be executed was only requested to run on a
remote node. It may peek inside the client structure to check the status of
the command.
The clients of the daemon must, naturally, understand the return messages and
codes.
Routines in here may only READ the values in the client structure passed in
apart from client->private which they are free to do what they like with.
*/
#include "clvmd-common.h"
#include "clvmd-comms.h"
#include "clvm.h"
#include "clvmd.h"
#include "lvm-globals.h"
#include "lvm-functions.h"
#include "locking.h"
#include <sys/utsname.h>
extern struct cluster_ops *clops;
static int restart_clvmd(void);
/* This is where all the real work happens:
NOTE: client will be NULL when this is executed on a remote node */
int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
char **buf, int buflen, int *retlen)
{
char *args = msg->node + strlen(msg->node) + 1;
int arglen = msglen - sizeof(struct clvm_header) - strlen(msg->node);
int status = 0;
char *lockname;
const char *locktype;
struct utsname nodeinfo;
unsigned char lock_cmd;
unsigned char lock_flags;
/* Do the command */
switch (msg->cmd) {
/* Just a test message */
case CLVMD_CMD_TEST:
if (arglen > buflen) {
char *new_buf;
buflen = arglen + 200;
new_buf = realloc(*buf, buflen);
if (new_buf == NULL) {
status = errno;
free (*buf);
}
*buf = new_buf;
}
if (*buf) {
if (uname(&nodeinfo))
memset(&nodeinfo, 0, sizeof(nodeinfo));
*retlen = 1 + dm_snprintf(*buf, buflen,
"TEST from %s: %s v%s",
nodeinfo.nodename, args,
nodeinfo.release);
}
break;
case CLVMD_CMD_LOCK_VG:
lock_cmd = args[0];
lock_flags = args[1];
lockname = &args[2];
/* Check to see if the VG is in use by LVM1 */
status = do_check_lvm1(lockname);
do_lock_vg(lock_cmd, lock_flags, lockname);
break;
case CLVMD_CMD_LOCK_LV:
/* This is the biggie */
lock_cmd = args[0];
lock_flags = args[1];
lockname = &args[2];
status = do_lock_lv(lock_cmd, lock_flags, lockname);
/* Replace EIO with something less scary */
if (status == EIO) {
*retlen = 1 + dm_snprintf(*buf, buflen, "%s",
get_last_lvm_error());
return EIO;
}
break;
case CLVMD_CMD_LOCK_QUERY:
lockname = &args[2];
if (buflen < 3)
return EIO;
if ((locktype = do_lock_query(lockname)))
*retlen = 1 + dm_snprintf(*buf, buflen, "%s", locktype);
break;
case CLVMD_CMD_REFRESH:
do_refresh_cache();
break;
case CLVMD_CMD_SYNC_NAMES:
lvm_do_fs_unlock();
break;
case CLVMD_CMD_SET_DEBUG:
clvmd_set_debug((debug_t) args[0]);
break;
case CLVMD_CMD_RESTART:
status = restart_clvmd();
break;
case CLVMD_CMD_GET_CLUSTERNAME:
status = clops->get_cluster_name(*buf, buflen);
if (!status)
*retlen = strlen(*buf)+1;
break;
case CLVMD_CMD_VG_BACKUP:
/*
* Do not run backup on local node, caller should do that.
*/
if (!client)
lvm_do_backup(&args[2]);
break;
default:
/* Won't get here because command is validated in pre_command */
break;
}
/* Check the status of the command and return the error text */
if (status) {
*retlen = 1 + ((*buf) ? dm_snprintf(*buf, buflen, "%s",
strerror(status)) : -1);
}
return status;
}
static int lock_vg(struct local_client *client)
{
struct dm_hash_table *lock_hash;
struct clvm_header *header =
(struct clvm_header *) client->bits.localsock.cmd;
unsigned char lock_cmd;
int lock_mode;
char *args = header->node + strlen(header->node) + 1;
int lkid;
int status;
char *lockname;
/*
* Keep a track of VG locks in our own hash table. In current
* practice there should only ever be more than two VGs locked
* if a user tries to merge lots of them at once
*/
if (!client->bits.localsock.private) {
if (!(lock_hash = dm_hash_create(3)))
return ENOMEM;
client->bits.localsock.private = (void *) lock_hash;
} else
lock_hash = (struct dm_hash_table *) client->bits.localsock.private;
lock_cmd = args[0] & (LCK_NONBLOCK | LCK_HOLD | LCK_SCOPE_MASK | LCK_TYPE_MASK);
lock_mode = ((int) lock_cmd & LCK_TYPE_MASK);
/* lock_flags = args[1]; */
lockname = &args[2];
DEBUGLOG("doing PRE command LOCK_VG '%s' at %x (client=%p)\n", lockname, lock_cmd, client);
if (lock_mode == LCK_UNLOCK) {
if (!(lkid = (int) (long) dm_hash_lookup(lock_hash, lockname)))
return EINVAL;
if ((status = sync_unlock(lockname, lkid)))
status = errno;
else
dm_hash_remove(lock_hash, lockname);
} else {
/* Read locks need to be PR; other modes get passed through */
if (lock_mode == LCK_READ)
lock_mode = LCK_PREAD;
if ((status = sync_lock(lockname, lock_mode, (lock_cmd & LCK_NONBLOCK) ? LCKF_NOQUEUE : 0, &lkid)))
status = errno;
else if (!dm_hash_insert(lock_hash, lockname, (void *) (long) lkid))
return ENOMEM;
}
return status;
}
/* Pre-command is a good place to get locks that are needed only for the duration
of the commands around the cluster (don't forget to free them in post-command),
and to sanity check the command arguments */
int do_pre_command(struct local_client *client)
{
struct clvm_header *header =
(struct clvm_header *) client->bits.localsock.cmd;
unsigned char lock_cmd;
unsigned char lock_flags;
char *args = header->node + strlen(header->node) + 1;
int lockid = 0;
int status = 0;
char *lockname;
switch (header->cmd) {
case CLVMD_CMD_TEST:
status = sync_lock("CLVMD_TEST", LCK_EXCL, 0, &lockid);
client->bits.localsock.private = (void *)(long)lockid;
break;
case CLVMD_CMD_LOCK_VG:
lockname = &args[2];
/* We take out a real lock unless LCK_CACHE was set */
if (!strncmp(lockname, "V_", 2) ||
!strncmp(lockname, "P_#", 3))
status = lock_vg(client);
break;
case CLVMD_CMD_LOCK_LV:
lock_cmd = args[0];
lock_flags = args[1];
lockname = &args[2];
status = pre_lock_lv(lock_cmd, lock_flags, lockname);
break;
case CLVMD_CMD_REFRESH:
case CLVMD_CMD_GET_CLUSTERNAME:
case CLVMD_CMD_SET_DEBUG:
case CLVMD_CMD_VG_BACKUP:
case CLVMD_CMD_SYNC_NAMES:
case CLVMD_CMD_LOCK_QUERY:
case CLVMD_CMD_RESTART:
break;
default:
log_error("Unknown command %d received\n", header->cmd);
status = EINVAL;
}
return status;
}
/* Note that the post-command routine is called even if the pre-command or the real command
failed */
int do_post_command(struct local_client *client)
{
struct clvm_header *header =
(struct clvm_header *) client->bits.localsock.cmd;
int status = 0;
unsigned char lock_cmd;
unsigned char lock_flags;
char *args = header->node + strlen(header->node) + 1;
char *lockname;
switch (header->cmd) {
case CLVMD_CMD_TEST:
status = sync_unlock("CLVMD_TEST", (int) (long) client->bits.localsock.private);
client->bits.localsock.private = NULL;
break;
case CLVMD_CMD_LOCK_LV:
lock_cmd = args[0];
lock_flags = args[1];
lockname = &args[2];
status = post_lock_lv(lock_cmd, lock_flags, lockname);
break;
default:
/* Nothing to do here */
break;
}
return status;
}
/* Called when the client is about to be deleted */
void cmd_client_cleanup(struct local_client *client)
{
struct dm_hash_node *v;
struct dm_hash_table *lock_hash;
int lkid;
char *lockname;
DEBUGLOG("Client thread cleanup (%p)\n", client);
if (!client->bits.localsock.private)
return;
lock_hash = (struct dm_hash_table *)client->bits.localsock.private;
dm_hash_iterate(v, lock_hash) {
lkid = (int)(long)dm_hash_get_data(lock_hash, v);
lockname = dm_hash_get_key(lock_hash, v);
DEBUGLOG("Cleanup (%p): Unlocking lock %s %x\n", client, lockname, lkid);
(void) sync_unlock(lockname, lkid);
}
dm_hash_destroy(lock_hash);
client->bits.localsock.private = NULL;
}
static int restart_clvmd(void)
{
const char **argv;
char *lv_name;
int argc = 0, max_locks = 0;
struct dm_hash_node *hn = NULL;
char debug_arg[16];
const char *clvmd = getenv("LVM_CLVMD_BINARY") ? : CLVMD_PATH;
DEBUGLOG("clvmd restart requested\n");
/* Count exclusively-open LVs */
do {
hn = get_next_excl_lock(hn, &lv_name);
if (lv_name) {
max_locks++;
if (!*lv_name)
break; /* FIXME: Is this error ? */
}
} while (hn);
/* clvmd + locks (-E uuid) + debug (-d X) + NULL */
if (!(argv = malloc((max_locks * 2 + 6) * sizeof(*argv))))
goto_out;
/*
* Build the command-line
*/
argv[argc++] = "clvmd";
/* Propagate debug options */
if (clvmd_get_debug()) {
if (dm_snprintf(debug_arg, sizeof(debug_arg), "-d%u", clvmd_get_debug()) < 0)
goto_out;
argv[argc++] = debug_arg;
}
/* Propagate foreground options */
if (clvmd_get_foreground())
argv[argc++] = "-f";
argv[argc++] = "-I";
argv[argc++] = clops->name;
/* Now add the exclusively-open LVs */
hn = NULL;
do {
hn = get_next_excl_lock(hn, &lv_name);
if (lv_name) {
if (!*lv_name)
break; /* FIXME: Is this error ? */
argv[argc++] = "-E";
argv[argc++] = lv_name;
DEBUGLOG("excl lock: %s\n", lv_name);
}
} while (hn);
argv[argc] = NULL;
/* Exec new clvmd */
DEBUGLOG("--- Restarting %s ---\n", clvmd);
for (argc = 1; argv[argc]; argc++) DEBUGLOG("--- %d: %s\n", argc, argv[argc]);
/* NOTE: This will fail when downgrading! */
execvp(clvmd, (char **)argv);
out:
/* We failed */
DEBUGLOG("Restart of clvmd failed.\n");
free(argv);
return EIO;
}

View File

@ -1,6 +1,5 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -12,3 +11,17 @@
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file must be included first by every clvmd source file.
*/
#ifndef _LVM_CLVMD_COMMON_H
#define _LVM_CLVMD_COMMON_H
#define _REENTRANT
#include "tool.h"
#include "lvm-logging.h"
#endif

119
daemons/clvmd/clvmd-comms.h Normal file
View File

@ -0,0 +1,119 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* Abstraction layer for clvmd cluster communications
*/
#ifndef _CLVMD_COMMS_H
#define _CLVMD_COMMS_H
struct local_client;
struct cluster_ops {
const char *name;
void (*cluster_init_completed) (void);
int (*cluster_send_message) (const void *buf, int msglen,
const char *csid,
const char *errtext);
int (*name_from_csid) (const char *csid, char *name);
int (*csid_from_name) (char *csid, const char *name);
int (*get_num_nodes) (void);
int (*cluster_fd_callback) (struct local_client *fd, char *buf, int len,
const char *csid,
struct local_client **new_client);
int (*get_main_cluster_fd) (void); /* gets accept FD or cman cluster socket */
int (*cluster_do_node_callback) (struct local_client *client,
void (*callback) (struct local_client *,
const char *csid,
int node_up));
int (*is_quorate) (void);
void (*get_our_csid) (char *csid);
void (*add_up_node) (const char *csid);
void (*reread_config) (void);
void (*cluster_closedown) (void);
int (*get_cluster_name)(char *buf, int buflen);
int (*sync_lock) (const char *resource, int mode,
int flags, int *lockid);
int (*sync_unlock) (const char *resource, int lockid);
};
#ifdef USE_CMAN
# include <netinet/in.h>
# include "libcman.h"
# define CMAN_MAX_CSID_LEN 4
# ifndef MAX_CSID_LEN
# define MAX_CSID_LEN CMAN_MAX_CSID_LEN
# endif
# undef MAX_CLUSTER_MEMBER_NAME_LEN
# define MAX_CLUSTER_MEMBER_NAME_LEN CMAN_MAX_NODENAME_LEN
# define CMAN_MAX_CLUSTER_MESSAGE 1500
# define CLUSTER_PORT_CLVMD 11
struct cluster_ops *init_cman_cluster(void);
#endif
#ifdef USE_OPENAIS
# include <openais/saAis.h>
# include <corosync/totem/totem.h>
# define OPENAIS_CSID_LEN (sizeof(int))
# define OPENAIS_MAX_CLUSTER_MESSAGE MESSAGE_SIZE_MAX
# define OPENAIS_MAX_CLUSTER_MEMBER_NAME_LEN SA_MAX_NAME_LENGTH
# ifndef MAX_CLUSTER_MEMBER_NAME_LEN
# define MAX_CLUSTER_MEMBER_NAME_LEN SA_MAX_NAME_LENGTH
# endif
# ifndef CMAN_MAX_CLUSTER_MESSAGE
# define CMAN_MAX_CLUSTER_MESSAGE MESSAGE_SIZE_MAX
# endif
# ifndef MAX_CSID_LEN
# define MAX_CSID_LEN sizeof(int)
# endif
struct cluster_ops *init_openais_cluster(void);
#endif
#ifdef USE_COROSYNC
# include <corosync/corotypes.h>
# define COROSYNC_CSID_LEN (sizeof(int))
# define COROSYNC_MAX_CLUSTER_MESSAGE 65535
# define COROSYNC_MAX_CLUSTER_MEMBER_NAME_LEN CS_MAX_NAME_LENGTH
# ifndef MAX_CLUSTER_MEMBER_NAME_LEN
# define MAX_CLUSTER_MEMBER_NAME_LEN CS_MAX_NAME_LENGTH
# endif
# ifndef CMAN_MAX_CLUSTER_MESSAGE
# define CMAN_MAX_CLUSTER_MESSAGE 65535
# endif
# ifndef MAX_CSID_LEN
# define MAX_CSID_LEN sizeof(int)
# endif
struct cluster_ops *init_corosync_cluster(void);
#endif
#ifdef USE_SINGLENODE
# define SINGLENODE_CSID_LEN (sizeof(int))
# ifndef MAX_CLUSTER_MEMBER_NAME_LEN
# define MAX_CLUSTER_MEMBER_NAME_LEN 64
# endif
# define SINGLENODE_MAX_CLUSTER_MESSAGE 65535
# ifndef MAX_CSID_LEN
# define MAX_CSID_LEN sizeof(int)
# endif
struct cluster_ops *init_singlenode_cluster(void);
#endif
#endif

View File

@ -0,0 +1,658 @@
/*
* Copyright (C) 2009-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This provides the interface between clvmd and corosync/DLM as the cluster
* and lock manager.
*/
#include "clvmd-common.h"
#include <pthread.h>
#include "clvm.h"
#include "clvmd-comms.h"
#include "clvmd.h"
#include "lvm-functions.h"
#include "locking.h"
#include <corosync/cpg.h>
#include <corosync/quorum.h>
#ifdef HAVE_COROSYNC_CONFDB_H
# include <corosync/confdb.h>
#elif defined HAVE_COROSYNC_CMAP_H
# include <corosync/cmap.h>
#else
# error "Either HAVE_COROSYNC_CONFDB_H or HAVE_COROSYNC_CMAP_H must be defined."
#endif
#include <libdlm.h>
#include <syslog.h>
/* Timeout value for several corosync calls */
#define LOCKSPACE_NAME "clvmd"
static void corosync_cpg_deliver_callback (cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid,
uint32_t pid,
void *msg,
size_t msg_len);
static void corosync_cpg_confchg_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries);
static void _cluster_closedown(void);
/* Hash list of nodes in the cluster */
static struct dm_hash_table *node_hash;
/* Number of active nodes */
static int num_nodes;
static unsigned int our_nodeid;
static struct local_client *cluster_client;
/* Corosync handles */
static cpg_handle_t cpg_handle;
static quorum_handle_t quorum_handle;
/* DLM Handle */
static dlm_lshandle_t *lockspace;
static struct cpg_name cpg_group_name;
/* Corosync callback structs */
cpg_callbacks_t corosync_cpg_callbacks = {
.cpg_deliver_fn = corosync_cpg_deliver_callback,
.cpg_confchg_fn = corosync_cpg_confchg_callback,
};
quorum_callbacks_t quorum_callbacks = {
.quorum_notify_fn = NULL,
};
struct node_info
{
enum {NODE_DOWN, NODE_CLVMD} state;
int nodeid;
};
/* Set errno to something approximating the right value and return 0 or -1 */
static int cs_to_errno(cs_error_t err)
{
switch(err)
{
case CS_OK:
return 0;
case CS_ERR_LIBRARY:
errno = EINVAL;
break;
case CS_ERR_VERSION:
errno = EINVAL;
break;
case CS_ERR_INIT:
errno = EINVAL;
break;
case CS_ERR_TIMEOUT:
errno = ETIME;
break;
case CS_ERR_TRY_AGAIN:
errno = EAGAIN;
break;
case CS_ERR_INVALID_PARAM:
errno = EINVAL;
break;
case CS_ERR_NO_MEMORY:
errno = ENOMEM;
break;
case CS_ERR_BAD_HANDLE:
errno = EINVAL;
break;
case CS_ERR_BUSY:
errno = EBUSY;
break;
case CS_ERR_ACCESS:
errno = EPERM;
break;
case CS_ERR_NOT_EXIST:
errno = ENOENT;
break;
case CS_ERR_NAME_TOO_LONG:
errno = ENAMETOOLONG;
break;
case CS_ERR_EXIST:
errno = EEXIST;
break;
case CS_ERR_NO_SPACE:
errno = ENOSPC;
break;
case CS_ERR_INTERRUPT:
errno = EINTR;
break;
case CS_ERR_NAME_NOT_FOUND:
errno = ENOENT;
break;
case CS_ERR_NO_RESOURCES:
errno = ENOMEM;
break;
case CS_ERR_NOT_SUPPORTED:
errno = EOPNOTSUPP;
break;
case CS_ERR_BAD_OPERATION:
errno = EINVAL;
break;
case CS_ERR_FAILED_OPERATION:
errno = EIO;
break;
case CS_ERR_MESSAGE_ERROR:
errno = EIO;
break;
case CS_ERR_QUEUE_FULL:
errno = EXFULL;
break;
case CS_ERR_QUEUE_NOT_AVAILABLE:
errno = EINVAL;
break;
case CS_ERR_BAD_FLAGS:
errno = EINVAL;
break;
case CS_ERR_TOO_BIG:
errno = E2BIG;
break;
case CS_ERR_NO_SECTIONS:
errno = ENOMEM;
break;
default:
errno = EINVAL;
break;
}
return -1;
}
static char *print_corosync_csid(const char *csid)
{
static char buf[128];
int id;
memcpy(&id, csid, sizeof(int));
sprintf(buf, "%d", id);
return buf;
}
static void corosync_cpg_deliver_callback (cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid,
uint32_t pid,
void *msg,
size_t msg_len)
{
int target_nodeid;
memcpy(&target_nodeid, msg, COROSYNC_CSID_LEN);
DEBUGLOG("%u got message from nodeid %d for %d. len %zd\n",
our_nodeid, nodeid, target_nodeid, msg_len-4);
if (nodeid != our_nodeid)
if (target_nodeid == our_nodeid || target_nodeid == 0)
process_message(cluster_client, (char *)msg+COROSYNC_CSID_LEN,
msg_len-COROSYNC_CSID_LEN, (char*)&nodeid);
}
static void corosync_cpg_confchg_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries)
{
int i;
struct node_info *ninfo;
DEBUGLOG("confchg callback. %zd joined, %zd left, %zd members\n",
joined_list_entries, left_list_entries, member_list_entries);
for (i=0; i<joined_list_entries; i++) {
ninfo = dm_hash_lookup_binary(node_hash,
(char *)&joined_list[i].nodeid,
COROSYNC_CSID_LEN);
if (!ninfo) {
ninfo = malloc(sizeof(struct node_info));
if (!ninfo) {
break;
}
else {
ninfo->nodeid = joined_list[i].nodeid;
dm_hash_insert_binary(node_hash,
(char *)&ninfo->nodeid,
COROSYNC_CSID_LEN, ninfo);
}
}
ninfo->state = NODE_CLVMD;
}
for (i=0; i<left_list_entries; i++) {
ninfo = dm_hash_lookup_binary(node_hash,
(char *)&left_list[i].nodeid,
COROSYNC_CSID_LEN);
if (ninfo)
ninfo->state = NODE_DOWN;
}
num_nodes = member_list_entries;
}
static int _init_cluster(void)
{
cs_error_t err;
#ifdef QUORUM_SET /* corosync/quorum.h */
uint32_t quorum_type;
#endif
node_hash = dm_hash_create(100);
err = cpg_initialize(&cpg_handle,
&corosync_cpg_callbacks);
if (err != CS_OK) {
syslog(LOG_ERR, "Cannot initialise Corosync CPG service: %d",
err);
DEBUGLOG("Cannot initialise Corosync CPG service: %d", err);
return cs_to_errno(err);
}
#ifdef QUORUM_SET
err = quorum_initialize(&quorum_handle,
&quorum_callbacks,
&quorum_type);
if (quorum_type != QUORUM_SET) {
syslog(LOG_ERR, "Corosync quorum service is not configured");
DEBUGLOG("Corosync quorum service is not configured");
return EINVAL;
}
#else
err = quorum_initialize(&quorum_handle,
&quorum_callbacks);
#endif
if (err != CS_OK) {
syslog(LOG_ERR, "Cannot initialise Corosync quorum service: %d",
err);
DEBUGLOG("Cannot initialise Corosync quorum service: %d", err);
return cs_to_errno(err);
}
/* Create a lockspace for LV & VG locks to live in */
lockspace = dlm_open_lockspace(LOCKSPACE_NAME);
if (!lockspace) {
lockspace = dlm_create_lockspace(LOCKSPACE_NAME, 0600);
if (!lockspace) {
syslog(LOG_ERR, "Unable to create DLM lockspace for CLVM: %m");
return -1;
}
DEBUGLOG("Created DLM lockspace for CLVMD.\n");
} else
DEBUGLOG("Opened existing DLM lockspace for CLVMD.\n");
dlm_ls_pthread_init(lockspace);
DEBUGLOG("DLM initialisation complete\n");
/* Connect to the clvmd group */
strcpy((char *)cpg_group_name.value, "clvmd");
cpg_group_name.length = strlen((char *)cpg_group_name.value);
err = cpg_join(cpg_handle, &cpg_group_name);
if (err != CS_OK) {
cpg_finalize(cpg_handle);
quorum_finalize(quorum_handle);
dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1);
syslog(LOG_ERR, "Cannot join clvmd process group");
DEBUGLOG("Cannot join clvmd process group: %d\n", err);
return cs_to_errno(err);
}
err = cpg_local_get(cpg_handle,
&our_nodeid);
if (err != CS_OK) {
cpg_finalize(cpg_handle);
quorum_finalize(quorum_handle);
dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1);
syslog(LOG_ERR, "Cannot get local node id\n");
return cs_to_errno(err);
}
DEBUGLOG("Our local node id is %d\n", our_nodeid);
DEBUGLOG("Connected to Corosync\n");
return 0;
}
static void _cluster_closedown(void)
{
dlm_release_lockspace(LOCKSPACE_NAME, lockspace, 1);
cpg_finalize(cpg_handle);
quorum_finalize(quorum_handle);
}
static void _get_our_csid(char *csid)
{
memcpy(csid, &our_nodeid, sizeof(int));
}
/* Corosync doesn't really have nmode names so we
just use the node ID in hex instead */
static int _csid_from_name(char *csid, const char *name)
{
int nodeid;
struct node_info *ninfo;
if (sscanf(name, "%x", &nodeid) == 1) {
ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN);
if (ninfo)
return nodeid;
}
return -1;
}
static int _name_from_csid(const char *csid, char *name)
{
struct node_info *ninfo;
ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN);
if (!ninfo)
{
sprintf(name, "UNKNOWN %s", print_corosync_csid(csid));
return -1;
}
sprintf(name, "%x", ninfo->nodeid);
return 0;
}
static int _get_num_nodes(void)
{
DEBUGLOG("num_nodes = %d\n", num_nodes);
return num_nodes;
}
/* Node is now known to be running a clvmd */
static void _add_up_node(const char *csid)
{
struct node_info *ninfo;
ninfo = dm_hash_lookup_binary(node_hash, csid, COROSYNC_CSID_LEN);
if (!ninfo) {
DEBUGLOG("corosync_add_up_node no node_hash entry for csid %s\n",
print_corosync_csid(csid));
return;
}
DEBUGLOG("corosync_add_up_node %d\n", ninfo->nodeid);
ninfo->state = NODE_CLVMD;
return;
}
/* Call a callback for each node, so the caller knows whether it's up or down */
static int _cluster_do_node_callback(struct local_client *master_client,
void (*callback)(struct local_client *,
const char *csid, int node_up))
{
struct dm_hash_node *hn;
struct node_info *ninfo;
dm_hash_iterate(hn, node_hash)
{
char csid[COROSYNC_CSID_LEN];
ninfo = dm_hash_get_data(node_hash, hn);
memcpy(csid, dm_hash_get_key(node_hash, hn), COROSYNC_CSID_LEN);
DEBUGLOG("down_callback. node %d, state = %d\n", ninfo->nodeid,
ninfo->state);
if (ninfo->state == NODE_CLVMD)
callback(master_client, csid, 1);
}
return 0;
}
/* Real locking */
static int _lock_resource(const char *resource, int mode, int flags, int *lockid)
{
struct dlm_lksb lksb;
int err;
DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n", resource, flags, mode);
if (flags & LKF_CONVERT)
lksb.sb_lkid = *lockid;
err = dlm_ls_lock_wait(lockspace,
mode,
&lksb,
flags,
resource,
strlen(resource),
0,
NULL, NULL, NULL);
if (err != 0)
{
DEBUGLOG("dlm_ls_lock returned %d\n", errno);
return err;
}
if (lksb.sb_status != 0)
{
DEBUGLOG("dlm_ls_lock returns lksb.sb_status %d\n", lksb.sb_status);
errno = lksb.sb_status;
return -1;
}
DEBUGLOG("lock_resource returning %d, lock_id=%x\n", err, lksb.sb_lkid);
*lockid = lksb.sb_lkid;
return 0;
}
static int _unlock_resource(const char *resource, int lockid)
{
struct dlm_lksb lksb;
int err;
DEBUGLOG("unlock_resource: %s lockid: %x\n", resource, lockid);
lksb.sb_lkid = lockid;
err = dlm_ls_unlock_wait(lockspace,
lockid,
0,
&lksb);
if (err != 0)
{
DEBUGLOG("Unlock returned %d\n", err);
return err;
}
if (lksb.sb_status != EUNLOCK)
{
DEBUGLOG("dlm_ls_unlock_wait returns lksb.sb_status: %d\n", lksb.sb_status);
errno = lksb.sb_status;
return -1;
}
return 0;
}
static int _is_quorate(void)
{
int quorate;
if (quorum_getquorate(quorum_handle, &quorate) == CS_OK)
return quorate;
else
return 0;
}
static int _get_main_cluster_fd(void)
{
int select_fd;
cpg_fd_get(cpg_handle, &select_fd);
return select_fd;
}
static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
const char *csid,
struct local_client **new_client)
{
cluster_client = fd;
*new_client = NULL;
cpg_dispatch(cpg_handle, CS_DISPATCH_ONE);
return 1;
}
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
const char *errtext)
{
struct iovec iov[2];
cs_error_t err;
int target_node;
if (csid)
memcpy(&target_node, csid, COROSYNC_CSID_LEN);
else
target_node = 0;
iov[0].iov_base = &target_node;
iov[0].iov_len = sizeof(int);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = msglen;
err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2);
return cs_to_errno(err);
}
#ifdef HAVE_COROSYNC_CONFDB_H
/*
* We are not necessarily connected to a Red Hat Cluster system,
* but if we are, this returns the cluster name from cluster.conf.
* I've used confdb rather than ccs to reduce the inter-package
* dependancies as well as to allow people to set a cluster name
* for themselves even if they are not running on RH cluster.
*/
static int _get_cluster_name(char *buf, int buflen)
{
confdb_handle_t handle;
int result;
size_t namelen = buflen;
hdb_handle_t cluster_handle;
confdb_callbacks_t callbacks = {
.confdb_key_change_notify_fn = NULL,
.confdb_object_create_change_notify_fn = NULL,
.confdb_object_delete_change_notify_fn = NULL
};
/* This is a default in case everything else fails */
strncpy(buf, "Corosync", buflen);
/* Look for a cluster name in confdb */
result = confdb_initialize (&handle, &callbacks);
if (result != CS_OK)
return 0;
result = confdb_object_find_start(handle, OBJECT_PARENT_HANDLE);
if (result != CS_OK)
goto out;
result = confdb_object_find(handle, OBJECT_PARENT_HANDLE, (void *)"cluster", strlen("cluster"), &cluster_handle);
if (result != CS_OK)
goto out;
result = confdb_key_get(handle, cluster_handle, (void *)"name", strlen("name"), buf, &namelen);
if (result != CS_OK)
goto out;
buf[namelen] = '\0';
out:
confdb_finalize(handle);
return 0;
}
#elif defined HAVE_COROSYNC_CMAP_H
static int _get_cluster_name(char *buf, int buflen)
{
cmap_handle_t cmap_handle = 0;
int result;
char *name = NULL;
/* This is a default in case everything else fails */
strncpy(buf, "Corosync", buflen);
/* Look for a cluster name in cmap */
result = cmap_initialize(&cmap_handle);
if (result != CS_OK)
return 0;
result = cmap_get_string(cmap_handle, "totem.cluster_name", &name);
if (result != CS_OK)
goto out;
memset(buf, 0, buflen);
strncpy(buf, name, buflen - 1);
out:
if (name)
free(name);
cmap_finalize(cmap_handle);
return 0;
}
#endif
static struct cluster_ops _cluster_corosync_ops = {
.name = "corosync",
.cluster_init_completed = NULL,
.cluster_send_message = _cluster_send_message,
.name_from_csid = _name_from_csid,
.csid_from_name = _csid_from_name,
.get_num_nodes = _get_num_nodes,
.cluster_fd_callback = _cluster_fd_callback,
.get_main_cluster_fd = _get_main_cluster_fd,
.cluster_do_node_callback = _cluster_do_node_callback,
.is_quorate = _is_quorate,
.get_our_csid = _get_our_csid,
.add_up_node = _add_up_node,
.reread_config = NULL,
.cluster_closedown = _cluster_closedown,
.get_cluster_name = _get_cluster_name,
.sync_lock = _lock_resource,
.sync_unlock = _unlock_resource,
};
struct cluster_ops *init_corosync_cluster(void)
{
if (!_init_cluster())
return &_cluster_corosync_ops;
else
return NULL;
}

View File

@ -0,0 +1,689 @@
/*
* Copyright (C) 2007-2009 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This provides the interface between clvmd and OpenAIS as the cluster
* and lock manager.
*/
#include "clvmd-common.h"
#include <pthread.h>
#include <fcntl.h>
#include <syslog.h>
#include <openais/saAis.h>
#include <openais/saLck.h>
#include <corosync/corotypes.h>
#include <corosync/cpg.h>
#include "locking.h"
#include "clvm.h"
#include "clvmd-comms.h"
#include "lvm-functions.h"
#include "clvmd.h"
/* Timeout value for several openais calls */
#define TIMEOUT 10
static void openais_cpg_deliver_callback (cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid,
uint32_t pid,
void *msg,
size_t msg_len);
static void openais_cpg_confchg_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries);
static void _cluster_closedown(void);
/* Hash list of nodes in the cluster */
static struct dm_hash_table *node_hash;
/* For associating lock IDs & resource handles */
static struct dm_hash_table *lock_hash;
/* Number of active nodes */
static int num_nodes;
static unsigned int our_nodeid;
static struct local_client *cluster_client;
/* OpenAIS handles */
static cpg_handle_t cpg_handle;
static SaLckHandleT lck_handle;
static struct cpg_name cpg_group_name;
/* Openais callback structs */
cpg_callbacks_t openais_cpg_callbacks = {
.cpg_deliver_fn = openais_cpg_deliver_callback,
.cpg_confchg_fn = openais_cpg_confchg_callback,
};
struct node_info
{
enum {NODE_UNKNOWN, NODE_DOWN, NODE_UP, NODE_CLVMD} state;
int nodeid;
};
struct lock_info
{
SaLckResourceHandleT res_handle;
SaLckLockIdT lock_id;
SaNameT lock_name;
};
/* Set errno to something approximating the right value and return 0 or -1 */
static int ais_to_errno(SaAisErrorT err)
{
switch(err)
{
case SA_AIS_OK:
return 0;
case SA_AIS_ERR_LIBRARY:
errno = EINVAL;
break;
case SA_AIS_ERR_VERSION:
errno = EINVAL;
break;
case SA_AIS_ERR_INIT:
errno = EINVAL;
break;
case SA_AIS_ERR_TIMEOUT:
errno = ETIME;
break;
case SA_AIS_ERR_TRY_AGAIN:
errno = EAGAIN;
break;
case SA_AIS_ERR_INVALID_PARAM:
errno = EINVAL;
break;
case SA_AIS_ERR_NO_MEMORY:
errno = ENOMEM;
break;
case SA_AIS_ERR_BAD_HANDLE:
errno = EINVAL;
break;
case SA_AIS_ERR_BUSY:
errno = EBUSY;
break;
case SA_AIS_ERR_ACCESS:
errno = EPERM;
break;
case SA_AIS_ERR_NOT_EXIST:
errno = ENOENT;
break;
case SA_AIS_ERR_NAME_TOO_LONG:
errno = ENAMETOOLONG;
break;
case SA_AIS_ERR_EXIST:
errno = EEXIST;
break;
case SA_AIS_ERR_NO_SPACE:
errno = ENOSPC;
break;
case SA_AIS_ERR_INTERRUPT:
errno = EINTR;
break;
case SA_AIS_ERR_NAME_NOT_FOUND:
errno = ENOENT;
break;
case SA_AIS_ERR_NO_RESOURCES:
errno = ENOMEM;
break;
case SA_AIS_ERR_NOT_SUPPORTED:
errno = EOPNOTSUPP;
break;
case SA_AIS_ERR_BAD_OPERATION:
errno = EINVAL;
break;
case SA_AIS_ERR_FAILED_OPERATION:
errno = EIO;
break;
case SA_AIS_ERR_MESSAGE_ERROR:
errno = EIO;
break;
case SA_AIS_ERR_QUEUE_FULL:
errno = EXFULL;
break;
case SA_AIS_ERR_QUEUE_NOT_AVAILABLE:
errno = EINVAL;
break;
case SA_AIS_ERR_BAD_FLAGS:
errno = EINVAL;
break;
case SA_AIS_ERR_TOO_BIG:
errno = E2BIG;
break;
case SA_AIS_ERR_NO_SECTIONS:
errno = ENOMEM;
break;
default:
errno = EINVAL;
break;
}
return -1;
}
static char *print_openais_csid(const char *csid)
{
static char buf[128];
int id;
memcpy(&id, csid, sizeof(int));
sprintf(buf, "%d", id);
return buf;
}
static int add_internal_client(int fd, fd_callback_t callback)
{
struct local_client *client;
DEBUGLOG("Add_internal_client, fd = %d\n", fd);
if (!(client = dm_zalloc(sizeof(*client)))) {
DEBUGLOG("malloc failed\n");
return -1;
}
client->fd = fd;
client->type = CLUSTER_INTERNAL;
client->callback = callback;
add_client(client);
/* Set Close-on-exec */
fcntl(fd, F_SETFD, 1);
return 0;
}
static void openais_cpg_deliver_callback (cpg_handle_t handle,
const struct cpg_name *groupName,
uint32_t nodeid,
uint32_t pid,
void *msg,
size_t msg_len)
{
int target_nodeid;
memcpy(&target_nodeid, msg, OPENAIS_CSID_LEN);
DEBUGLOG("%u got message from nodeid %d for %d. len %" PRIsize_t "\n",
our_nodeid, nodeid, target_nodeid, msg_len-4);
if (nodeid != our_nodeid)
if (target_nodeid == our_nodeid || target_nodeid == 0)
process_message(cluster_client, (char *)msg+OPENAIS_CSID_LEN,
msg_len-OPENAIS_CSID_LEN, (char*)&nodeid);
}
static void openais_cpg_confchg_callback(cpg_handle_t handle,
const struct cpg_name *groupName,
const struct cpg_address *member_list, size_t member_list_entries,
const struct cpg_address *left_list, size_t left_list_entries,
const struct cpg_address *joined_list, size_t joined_list_entries)
{
int i;
struct node_info *ninfo;
DEBUGLOG("confchg callback. %" PRIsize_t " joined, "
FMTsize_t " left, %" PRIsize_t " members\n",
joined_list_entries, left_list_entries, member_list_entries);
for (i=0; i<joined_list_entries; i++) {
ninfo = dm_hash_lookup_binary(node_hash,
(char *)&joined_list[i].nodeid,
OPENAIS_CSID_LEN);
if (!ninfo) {
ninfo = malloc(sizeof(struct node_info));
if (!ninfo) {
break;
}
else {
ninfo->nodeid = joined_list[i].nodeid;
dm_hash_insert_binary(node_hash,
(char *)&ninfo->nodeid,
OPENAIS_CSID_LEN, ninfo);
}
}
ninfo->state = NODE_CLVMD;
}
for (i=0; i<left_list_entries; i++) {
ninfo = dm_hash_lookup_binary(node_hash,
(char *)&left_list[i].nodeid,
OPENAIS_CSID_LEN);
if (ninfo)
ninfo->state = NODE_DOWN;
}
for (i=0; i<member_list_entries; i++) {
if (member_list[i].nodeid == 0) continue;
ninfo = dm_hash_lookup_binary(node_hash,
(char *)&member_list[i].nodeid,
OPENAIS_CSID_LEN);
if (!ninfo) {
ninfo = malloc(sizeof(struct node_info));
if (!ninfo) {
break;
}
else {
ninfo->nodeid = member_list[i].nodeid;
dm_hash_insert_binary(node_hash,
(char *)&ninfo->nodeid,
OPENAIS_CSID_LEN, ninfo);
}
}
ninfo->state = NODE_CLVMD;
}
num_nodes = member_list_entries;
}
static int lck_dispatch(struct local_client *client, char *buf, int len,
const char *csid, struct local_client **new_client)
{
*new_client = NULL;
saLckDispatch(lck_handle, SA_DISPATCH_ONE);
return 1;
}
static int _init_cluster(void)
{
SaAisErrorT err;
SaVersionT ver = { 'B', 1, 1 };
int select_fd;
node_hash = dm_hash_create(100);
lock_hash = dm_hash_create(10);
err = cpg_initialize(&cpg_handle,
&openais_cpg_callbacks);
if (err != SA_AIS_OK) {
syslog(LOG_ERR, "Cannot initialise OpenAIS CPG service: %d",
err);
DEBUGLOG("Cannot initialise OpenAIS CPG service: %d", err);
return ais_to_errno(err);
}
err = saLckInitialize(&lck_handle,
NULL,
&ver);
if (err != SA_AIS_OK) {
cpg_initialize(&cpg_handle, &openais_cpg_callbacks);
syslog(LOG_ERR, "Cannot initialise OpenAIS lock service: %d",
err);
DEBUGLOG("Cannot initialise OpenAIS lock service: %d\n\n", err);
return ais_to_errno(err);
}
/* Connect to the clvmd group */
strcpy((char *)cpg_group_name.value, "clvmd");
cpg_group_name.length = strlen((char *)cpg_group_name.value);
err = cpg_join(cpg_handle, &cpg_group_name);
if (err != SA_AIS_OK) {
cpg_finalize(cpg_handle);
saLckFinalize(lck_handle);
syslog(LOG_ERR, "Cannot join clvmd process group");
DEBUGLOG("Cannot join clvmd process group: %d\n", err);
return ais_to_errno(err);
}
err = cpg_local_get(cpg_handle,
&our_nodeid);
if (err != SA_AIS_OK) {
cpg_finalize(cpg_handle);
saLckFinalize(lck_handle);
syslog(LOG_ERR, "Cannot get local node id\n");
return ais_to_errno(err);
}
DEBUGLOG("Our local node id is %d\n", our_nodeid);
saLckSelectionObjectGet(lck_handle, (SaSelectionObjectT *)&select_fd);
add_internal_client(select_fd, lck_dispatch);
DEBUGLOG("Connected to OpenAIS\n");
return 0;
}
static void _cluster_closedown(void)
{
saLckFinalize(lck_handle);
cpg_finalize(cpg_handle);
}
static void _get_our_csid(char *csid)
{
memcpy(csid, &our_nodeid, sizeof(int));
}
/* OpenAIS doesn't really have nmode names so we
just use the node ID in hex instead */
static int _csid_from_name(char *csid, const char *name)
{
int nodeid;
struct node_info *ninfo;
if (sscanf(name, "%x", &nodeid) == 1) {
ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN);
if (ninfo)
return nodeid;
}
return -1;
}
static int _name_from_csid(const char *csid, char *name)
{
struct node_info *ninfo;
ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN);
if (!ninfo)
{
sprintf(name, "UNKNOWN %s", print_openais_csid(csid));
return -1;
}
sprintf(name, "%x", ninfo->nodeid);
return 0;
}
static int _get_num_nodes()
{
DEBUGLOG("num_nodes = %d\n", num_nodes);
return num_nodes;
}
/* Node is now known to be running a clvmd */
static void _add_up_node(const char *csid)
{
struct node_info *ninfo;
ninfo = dm_hash_lookup_binary(node_hash, csid, OPENAIS_CSID_LEN);
if (!ninfo) {
DEBUGLOG("openais_add_up_node no node_hash entry for csid %s\n",
print_openais_csid(csid));
return;
}
DEBUGLOG("openais_add_up_node %d\n", ninfo->nodeid);
ninfo->state = NODE_CLVMD;
return;
}
/* Call a callback for each node, so the caller knows whether it's up or down */
static int _cluster_do_node_callback(struct local_client *master_client,
void (*callback)(struct local_client *,
const char *csid, int node_up))
{
struct dm_hash_node *hn;
struct node_info *ninfo;
int somedown = 0;
dm_hash_iterate(hn, node_hash)
{
char csid[OPENAIS_CSID_LEN];
ninfo = dm_hash_get_data(node_hash, hn);
memcpy(csid, dm_hash_get_key(node_hash, hn), OPENAIS_CSID_LEN);
DEBUGLOG("down_callback. node %d, state = %d\n", ninfo->nodeid,
ninfo->state);
if (ninfo->state != NODE_DOWN)
callback(master_client, csid, ninfo->state == NODE_CLVMD);
if (ninfo->state != NODE_CLVMD)
somedown = -1;
}
return somedown;
}
/* Real locking */
static int _lock_resource(char *resource, int mode, int flags, int *lockid)
{
struct lock_info *linfo;
SaLckResourceHandleT res_handle;
SaAisErrorT err;
SaLckLockIdT lock_id;
SaLckLockStatusT lockStatus;
/* This needs to be converted from DLM/LVM2 value for OpenAIS LCK */
if (flags & LCK_NONBLOCK) flags = SA_LCK_LOCK_NO_QUEUE;
linfo = malloc(sizeof(struct lock_info));
if (!linfo)
return -1;
DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n", resource, flags, mode);
linfo->lock_name.length = strlen(resource)+1;
strcpy((char *)linfo->lock_name.value, resource);
err = saLckResourceOpen(lck_handle, &linfo->lock_name,
SA_LCK_RESOURCE_CREATE, TIMEOUT, &res_handle);
if (err != SA_AIS_OK)
{
DEBUGLOG("ResourceOpen returned %d\n", err);
free(linfo);
return ais_to_errno(err);
}
err = saLckResourceLock(
res_handle,
&lock_id,
mode,
flags,
0,
SA_TIME_END,
&lockStatus);
if (err != SA_AIS_OK && lockStatus != SA_LCK_LOCK_GRANTED)
{
free(linfo);
saLckResourceClose(res_handle);
return ais_to_errno(err);
}
/* Wait for it to complete */
DEBUGLOG("lock_resource returning %d, lock_id=%" PRIx64 "\n",
err, lock_id);
linfo->lock_id = lock_id;
linfo->res_handle = res_handle;
dm_hash_insert(lock_hash, resource, linfo);
return ais_to_errno(err);
}
static int _unlock_resource(char *resource, int lockid)
{
SaAisErrorT err;
struct lock_info *linfo;
DEBUGLOG("unlock_resource %s\n", resource);
linfo = dm_hash_lookup(lock_hash, resource);
if (!linfo)
return 0;
DEBUGLOG("unlock_resource: lockid: %" PRIx64 "\n", linfo->lock_id);
err = saLckResourceUnlock(linfo->lock_id, SA_TIME_END);
if (err != SA_AIS_OK)
{
DEBUGLOG("Unlock returned %d\n", err);
return ais_to_errno(err);
}
/* Release the resource */
dm_hash_remove(lock_hash, resource);
saLckResourceClose(linfo->res_handle);
free(linfo);
return ais_to_errno(err);
}
static int _sync_lock(const char *resource, int mode, int flags, int *lockid)
{
int status;
char lock1[strlen(resource)+3];
char lock2[strlen(resource)+3];
snprintf(lock1, sizeof(lock1), "%s-1", resource);
snprintf(lock2, sizeof(lock2), "%s-2", resource);
switch (mode)
{
case LCK_EXCL:
status = _lock_resource(lock1, SA_LCK_EX_LOCK_MODE, flags, lockid);
if (status)
goto out;
/* If we can't get this lock too then bail out */
status = _lock_resource(lock2, SA_LCK_EX_LOCK_MODE, LCK_NONBLOCK,
lockid);
if (status == SA_LCK_LOCK_NOT_QUEUED)
{
_unlock_resource(lock1, *lockid);
status = -1;
errno = EAGAIN;
}
break;
case LCK_PREAD:
case LCK_READ:
status = _lock_resource(lock1, SA_LCK_PR_LOCK_MODE, flags, lockid);
if (status)
goto out;
_unlock_resource(lock2, *lockid);
break;
case LCK_WRITE:
status = _lock_resource(lock2, SA_LCK_EX_LOCK_MODE, flags, lockid);
if (status)
goto out;
_unlock_resource(lock1, *lockid);
break;
default:
status = -1;
errno = EINVAL;
break;
}
out:
*lockid = mode;
return status;
}
static int _sync_unlock(const char *resource, int lockid)
{
int status = 0;
char lock1[strlen(resource)+3];
char lock2[strlen(resource)+3];
snprintf(lock1, sizeof(lock1), "%s-1", resource);
snprintf(lock2, sizeof(lock2), "%s-2", resource);
_unlock_resource(lock1, lockid);
_unlock_resource(lock2, lockid);
return status;
}
/* We are always quorate ! */
static int _is_quorate()
{
return 1;
}
static int _get_main_cluster_fd(void)
{
int select_fd;
cpg_fd_get(cpg_handle, &select_fd);
return select_fd;
}
static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
const char *csid,
struct local_client **new_client)
{
cluster_client = fd;
*new_client = NULL;
cpg_dispatch(cpg_handle, SA_DISPATCH_ONE);
return 1;
}
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
const char *errtext)
{
struct iovec iov[2];
SaAisErrorT err;
int target_node;
if (csid)
memcpy(&target_node, csid, OPENAIS_CSID_LEN);
else
target_node = 0;
iov[0].iov_base = &target_node;
iov[0].iov_len = sizeof(int);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = msglen;
err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2);
return ais_to_errno(err);
}
/* We don't have a cluster name to report here */
static int _get_cluster_name(char *buf, int buflen)
{
strncpy(buf, "OpenAIS", buflen);
return 0;
}
static struct cluster_ops _cluster_openais_ops = {
.name = "openais",
.cluster_init_completed = NULL,
.cluster_send_message = _cluster_send_message,
.name_from_csid = _name_from_csid,
.csid_from_name = _csid_from_name,
.get_num_nodes = _get_num_nodes,
.cluster_fd_callback = _cluster_fd_callback,
.get_main_cluster_fd = _get_main_cluster_fd,
.cluster_do_node_callback = _cluster_do_node_callback,
.is_quorate = _is_quorate,
.get_our_csid = _get_our_csid,
.add_up_node = _add_up_node,
.reread_config = NULL,
.cluster_closedown = _cluster_closedown,
.get_cluster_name = _get_cluster_name,
.sync_lock = _sync_lock,
.sync_unlock = _sync_unlock,
};
struct cluster_ops *init_openais_cluster(void)
{
if (!_init_cluster())
return &_cluster_openais_ops;
return NULL;
}

View File

@ -0,0 +1,382 @@
/*
* Copyright (C) 2009-2013 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "clvmd-common.h"
#include <pthread.h>
#include "locking.h"
#include "clvm.h"
#include "clvmd-comms.h"
#include "clvmd.h"
#include <sys/un.h>
#include <sys/socket.h>
#include <fcntl.h>
static const char SINGLENODE_CLVMD_SOCKNAME[] = DEFAULT_RUN_DIR "/clvmd_singlenode.sock";
static int listen_fd = -1;
static struct dm_hash_table *_locks;
static int _lockid;
static pthread_mutex_t _lock_mutex = PTHREAD_MUTEX_INITIALIZER;
/* Using one common condition for all locks for simplicity */
static pthread_cond_t _lock_cond = PTHREAD_COND_INITIALIZER;
struct lock {
struct dm_list list;
int lockid;
int mode;
};
static void close_comms(void)
{
if (listen_fd != -1 && close(listen_fd))
stack;
(void)unlink(SINGLENODE_CLVMD_SOCKNAME);
listen_fd = -1;
}
static int init_comms(void)
{
mode_t old_mask;
struct sockaddr_un addr = { .sun_family = AF_UNIX };
if (!dm_strncpy(addr.sun_path, SINGLENODE_CLVMD_SOCKNAME,
sizeof(addr.sun_path))) {
DEBUGLOG("%s: singlenode socket name too long.",
SINGLENODE_CLVMD_SOCKNAME);
return -1;
}
close_comms();
(void) dm_prepare_selinux_context(SINGLENODE_CLVMD_SOCKNAME, S_IFSOCK);
old_mask = umask(0077);
listen_fd = socket(PF_UNIX, SOCK_STREAM, 0);
if (listen_fd < 0) {
DEBUGLOG("Can't create local socket: %s\n", strerror(errno));
goto error;
}
/* Set Close-on-exec */
if (fcntl(listen_fd, F_SETFD, 1)) {
DEBUGLOG("Setting CLOEXEC on client fd failed: %s\n", strerror(errno));
goto error;
}
if (bind(listen_fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
DEBUGLOG("Can't bind local socket: %s\n", strerror(errno));
goto error;
}
if (listen(listen_fd, 10) < 0) {
DEBUGLOG("Can't listen local socket: %s\n", strerror(errno));
goto error;
}
umask(old_mask);
(void) dm_prepare_selinux_context(NULL, 0);
return 0;
error:
umask(old_mask);
(void) dm_prepare_selinux_context(NULL, 0);
close_comms();
return -1;
}
static int _init_cluster(void)
{
int r;
if (!(_locks = dm_hash_create(128))) {
DEBUGLOG("Failed to allocate single-node hash table.\n");
return 1;
}
r = init_comms();
if (r) {
dm_hash_destroy(_locks);
_locks = NULL;
return r;
}
DEBUGLOG("Single-node cluster initialised.\n");
return 0;
}
static void _cluster_closedown(void)
{
close_comms();
/* If there is any awaited resource, kill it softly */
pthread_mutex_lock(&_lock_mutex);
dm_hash_destroy(_locks);
_locks = NULL;
_lockid = 0;
pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */
pthread_mutex_unlock(&_lock_mutex);
}
static void _get_our_csid(char *csid)
{
int nodeid = 1;
memcpy(csid, &nodeid, sizeof(int));
}
static int _csid_from_name(char *csid, const char *name)
{
return 1;
}
static int _name_from_csid(const char *csid, char *name)
{
strcpy(name, "SINGLENODE");
return 0;
}
static int _get_num_nodes(void)
{
return 1;
}
/* Node is now known to be running a clvmd */
static void _add_up_node(const char *csid)
{
}
/* Call a callback for each node, so the caller knows whether it's up or down */
static int _cluster_do_node_callback(struct local_client *master_client,
void (*callback)(struct local_client *,
const char *csid, int node_up))
{
return 0;
}
int _lock_file(const char *file, uint32_t flags);
static const char *_get_mode(int mode)
{
switch (mode) {
case LCK_NULL: return "NULL";
case LCK_READ: return "READ";
case LCK_PREAD: return "PREAD";
case LCK_WRITE: return "WRITE";
case LCK_EXCL: return "EXCLUSIVE";
case LCK_UNLOCK: return "UNLOCK";
default: return "????";
}
}
/* Real locking */
static int _lock_resource(const char *resource, int mode, int flags, int *lockid)
{
/* DLM table of allowed transition states */
static const int _dlm_table[6][6] = {
/* Mode NL CR CW PR PW EX */
/* NL */ { 1, 1, 1, 1, 1, 1},
/* CR */ { 1, 1, 1, 1, 1, 0},
/* CW */ { 1, 1, 1, 0, 0, 0},
/* PR */ { 1, 1, 0, 1, 0, 0},
/* PW */ { 1, 1, 0, 0, 0, 0},
/* EX */ { 1, 0, 0, 0, 0, 0}
};
struct lock *lck = NULL, *lckt;
struct dm_list *head;
DEBUGLOG("Locking resource %s, flags=0x%02x (%s%s%s), mode=%s (%d)\n",
resource, flags,
(flags & LCKF_NOQUEUE) ? "NOQUEUE" : "",
((flags & (LCKF_NOQUEUE | LCKF_CONVERT)) ==
(LCKF_NOQUEUE | LCKF_CONVERT)) ? "|" : "",
(flags & LCKF_CONVERT) ? "CONVERT" : "",
_get_mode(mode), mode);
mode &= LCK_TYPE_MASK;
pthread_mutex_lock(&_lock_mutex);
retry:
if (!(head = dm_hash_lookup(_locks, resource))) {
if (flags & LCKF_CONVERT) {
/* In real DLM, lock is identified only by lockid, resource is not used */
DEBUGLOG("Unlocked resource %s cannot be converted\n", resource);
goto_bad;
}
/* Add new locked resource */
if (!(head = dm_malloc(sizeof(struct dm_list))) ||
!dm_hash_insert(_locks, resource, head)) {
dm_free(head);
goto_bad;
}
dm_list_init(head);
} else /* Update/convert locked resource */
dm_list_iterate_items(lck, head) {
/* Check is all locks are compatible with requested lock */
if (flags & LCKF_CONVERT) {
if (lck->lockid != *lockid)
continue;
DEBUGLOG("Converting resource %s lockid=%d mode:%s -> %s...\n",
resource, lck->lockid, _get_mode(lck->mode), _get_mode(mode));
dm_list_iterate_items(lckt, head) {
if ((lckt->lockid != *lockid) &&
!_dlm_table[mode][lckt->mode]) {
if (!(flags & LCKF_NOQUEUE) &&
/* TODO: Real dlm uses here conversion queues */
!pthread_cond_wait(&_lock_cond, &_lock_mutex) &&
_locks) /* End of the game? */
goto retry;
goto bad;
}
}
lck->mode = mode; /* Lock is now converted */
goto out;
} else if (!_dlm_table[mode][lck->mode]) {
DEBUGLOG("Resource %s already locked lockid=%d, mode:%s\n",
resource, lck->lockid, _get_mode(lck->mode));
if (!(flags & LCKF_NOQUEUE) &&
!pthread_cond_wait(&_lock_cond, &_lock_mutex) &&
_locks) { /* End of the game? */
DEBUGLOG("Resource %s retrying lock in mode:%s...\n",
resource, _get_mode(mode));
goto retry;
}
goto bad;
}
}
if (!(flags & LCKF_CONVERT)) {
if (!(lck = dm_malloc(sizeof(struct lock))))
goto_bad;
*lockid = lck->lockid = ++_lockid;
lck->mode = mode;
dm_list_add(head, &lck->list);
}
out:
pthread_cond_broadcast(&_lock_cond); /* to wakeup waiters */
pthread_mutex_unlock(&_lock_mutex);
DEBUGLOG("Locked resource %s, lockid=%d, mode=%s\n",
resource, lck->lockid, _get_mode(lck->mode));
return 0;
bad:
pthread_cond_broadcast(&_lock_cond); /* to wakeup waiters */
pthread_mutex_unlock(&_lock_mutex);
DEBUGLOG("Failed to lock resource %s\n", resource);
return 1; /* fail */
}
static int _unlock_resource(const char *resource, int lockid)
{
struct lock *lck;
struct dm_list *head;
int r = 1;
if (lockid < 0) {
DEBUGLOG("Not tracking unlock of lockid -1: %s, lockid=%d\n",
resource, lockid);
return 1;
}
DEBUGLOG("Unlocking resource %s, lockid=%d\n", resource, lockid);
pthread_mutex_lock(&_lock_mutex);
pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */
if (!(head = dm_hash_lookup(_locks, resource))) {
pthread_mutex_unlock(&_lock_mutex);
DEBUGLOG("Resource %s is not locked.\n", resource);
return 1;
}
dm_list_iterate_items(lck, head)
if (lck->lockid == lockid) {
dm_list_del(&lck->list);
dm_free(lck);
r = 0;
goto out;
}
DEBUGLOG("Resource %s has wrong lockid %d.\n", resource, lockid);
out:
if (dm_list_empty(head)) {
//DEBUGLOG("Resource %s is no longer hashed (lockid=%d).\n", resource, lockid);
dm_hash_remove(_locks, resource);
dm_free(head);
}
pthread_mutex_unlock(&_lock_mutex);
return r;
}
static int _is_quorate(void)
{
return 1;
}
static int _get_main_cluster_fd(void)
{
return listen_fd;
}
static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
const char *csid,
struct local_client **new_client)
{
return 1;
}
static int _cluster_send_message(const void *buf, int msglen,
const char *csid,
const char *errtext)
{
return 0;
}
static int _get_cluster_name(char *buf, int buflen)
{
return dm_strncpy(buf, "localcluster", buflen) ? 0 : 1;
}
static struct cluster_ops _cluster_singlenode_ops = {
.name = "singlenode",
.cluster_init_completed = NULL,
.cluster_send_message = _cluster_send_message,
.name_from_csid = _name_from_csid,
.csid_from_name = _csid_from_name,
.get_num_nodes = _get_num_nodes,
.cluster_fd_callback = _cluster_fd_callback,
.get_main_cluster_fd = _get_main_cluster_fd,
.cluster_do_node_callback = _cluster_do_node_callback,
.is_quorate = _is_quorate,
.get_our_csid = _get_our_csid,
.add_up_node = _add_up_node,
.reread_config = NULL,
.cluster_closedown = _cluster_closedown,
.get_cluster_name = _get_cluster_name,
.sync_lock = _lock_resource,
.sync_unlock = _unlock_resource,
};
struct cluster_ops *init_singlenode_cluster(void)
{
if (!_init_cluster())
return &_cluster_singlenode_ops;
return NULL;
}

2381
daemons/clvmd/clvmd.c Normal file

File diff suppressed because it is too large Load Diff

126
daemons/clvmd/clvmd.h Normal file
View File

@ -0,0 +1,126 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _CLVMD_H
#define _CLVMD_H
#define CLVMD_MAJOR_VERSION 0
#define CLVMD_MINOR_VERSION 2
#define CLVMD_PATCH_VERSION 1
/* Default time (in seconds) we will wait for all remote commands to execute
before declaring them dead */
#define DEFAULT_CMD_TIMEOUT 60
/* One of these for each reply we get from command execution on a node */
struct node_reply {
char node[MAX_CLUSTER_MEMBER_NAME_LEN];
char *replymsg;
int status;
struct node_reply *next;
};
typedef enum {DEBUG_OFF, DEBUG_STDERR, DEBUG_SYSLOG} debug_t;
/*
* These exist for the use of local sockets only when we are
* collecting responses from all cluster nodes
*/
struct localsock_bits {
struct node_reply *replies;
int num_replies;
int expected_replies;
time_t sent_time; /* So we can check for timeouts */
int in_progress; /* Only execute one cmd at a time per client */
int sent_out; /* Flag to indicate that a command was sent
to remote nodes */
void *private; /* Private area for command processor use */
void *cmd; /* Whole command as passed down local socket */
int cmd_len; /* Length of above */
int pipe; /* Pipe to send PRE completion status down */
int finished; /* Flag to tell subthread to exit */
int all_success; /* Set to 0 if any node (or the pre_command)
failed */
int cleanup_needed; /* helper for cleanup_zombie */
struct local_client *pipe_client;
pthread_t threadid;
enum { PRE_COMMAND, POST_COMMAND } state;
pthread_mutex_t mutex; /* Main thread and worker synchronisation */
pthread_cond_t cond;
};
/* Entries for PIPE clients */
struct pipe_bits {
struct local_client *client; /* Actual (localsock) client */
pthread_t threadid; /* Our own copy of the thread id */
};
/* Entries for Network socket clients */
struct netsock_bits {
void *private;
int flags;
};
typedef int (*fd_callback_t) (struct local_client * fd, char *buf, int len,
const char *csid,
struct local_client ** new_client);
/* One of these for each fd we are listening on */
struct local_client {
int fd;
enum { CLUSTER_MAIN_SOCK, CLUSTER_DATA_SOCK, LOCAL_RENDEZVOUS,
LOCAL_SOCK, THREAD_PIPE, CLUSTER_INTERNAL } type;
struct local_client *next;
unsigned short xid;
fd_callback_t callback;
uint8_t removeme;
union {
struct localsock_bits localsock;
struct pipe_bits pipe;
struct netsock_bits net;
} bits;
};
#define DEBUGLOG(fmt, args...) debuglog(fmt, ## args)
#ifndef max
#define max(a,b) ((a)>(b)?(a):(b))
#endif
/* The real command processor is in clvmd-command.c */
extern int do_command(struct local_client *client, struct clvm_header *msg,
int msglen, char **buf, int buflen, int *retlen);
/* Pre and post command routines are called only on the local node */
extern int do_pre_command(struct local_client *client);
extern int do_post_command(struct local_client *client);
extern void cmd_client_cleanup(struct local_client *client);
extern int add_client(struct local_client *new_client);
extern void clvmd_cluster_init_completed(void);
extern void process_message(struct local_client *client, char *buf,
int len, const char *csid);
extern void debuglog(const char *fmt, ... )
__attribute__ ((format(printf, 1, 2)));
void clvmd_set_debug(debug_t new_de);
debug_t clvmd_get_debug(void);
int clvmd_get_foreground(void);
int sync_lock(const char *resource, int mode, int flags, int *lockid);
int sync_unlock(const char *resource, int lockid);
#endif

View File

@ -0,0 +1,939 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "clvmd-common.h"
#include <pthread.h>
#include "clvm.h"
#include "clvmd-comms.h"
#include "clvmd.h"
#include "lvm-functions.h"
/* LVM2 headers */
#include "toolcontext.h"
#include "lvmcache.h"
#include "lvm-globals.h"
#include "activate.h"
#include "archiver.h"
#include "memlock.h"
#include <syslog.h>
static struct cmd_context *cmd = NULL;
static struct dm_hash_table *lv_hash = NULL;
static pthread_mutex_t lv_hash_lock;
static pthread_mutex_t lvm_lock;
static char last_error[1024];
struct lv_info {
int lock_id;
int lock_mode;
};
static const char *decode_full_locking_cmd(uint32_t cmdl)
{
static char buf[128];
const char *type;
const char *scope;
const char *command;
switch (cmdl & LCK_TYPE_MASK) {
case LCK_NULL:
type = "NULL";
break;
case LCK_READ:
type = "READ";
break;
case LCK_PREAD:
type = "PREAD";
break;
case LCK_WRITE:
type = "WRITE";
break;
case LCK_EXCL:
type = "EXCL";
break;
case LCK_UNLOCK:
type = "UNLOCK";
break;
default:
type = "unknown";
break;
}
switch (cmdl & LCK_SCOPE_MASK) {
case LCK_VG:
scope = "VG";
command = "LCK_VG";
break;
case LCK_LV:
scope = "LV";
switch (cmdl & LCK_MASK) {
case LCK_LV_EXCLUSIVE & LCK_MASK:
command = "LCK_LV_EXCLUSIVE";
break;
case LCK_LV_SUSPEND & LCK_MASK:
command = "LCK_LV_SUSPEND";
break;
case LCK_LV_RESUME & LCK_MASK:
command = "LCK_LV_RESUME";
break;
case LCK_LV_ACTIVATE & LCK_MASK:
command = "LCK_LV_ACTIVATE";
break;
case LCK_LV_DEACTIVATE & LCK_MASK:
command = "LCK_LV_DEACTIVATE";
break;
default:
command = "unknown";
break;
}
break;
default:
scope = "unknown";
command = "unknown";
break;
}
sprintf(buf, "0x%x %s (%s|%s%s%s%s%s)", cmdl, command, type, scope,
cmdl & LCK_NONBLOCK ? "|NONBLOCK" : "",
cmdl & LCK_HOLD ? "|HOLD" : "",
cmdl & LCK_CLUSTER_VG ? "|CLUSTER_VG" : "",
cmdl & LCK_CACHE ? "|CACHE" : "");
return buf;
}
/*
* Only processes 8 bits: excludes LCK_CACHE.
*/
static const char *decode_locking_cmd(unsigned char cmdl)
{
return decode_full_locking_cmd((uint32_t) cmdl);
}
static const char *decode_flags(unsigned char flags)
{
static char buf[128];
int len;
len = sprintf(buf, "0x%x ( %s%s%s%s%s%s%s%s)", flags,
flags & LCK_PARTIAL_MODE ? "PARTIAL_MODE|" : "",
flags & LCK_MIRROR_NOSYNC_MODE ? "MIRROR_NOSYNC|" : "",
flags & LCK_DMEVENTD_MONITOR_MODE ? "DMEVENTD_MONITOR|" : "",
flags & LCK_ORIGIN_ONLY_MODE ? "ORIGIN_ONLY|" : "",
flags & LCK_TEST_MODE ? "TEST|" : "",
flags & LCK_CONVERT_MODE ? "CONVERT|" : "",
flags & LCK_DMEVENTD_MONITOR_IGNORE ? "DMEVENTD_MONITOR_IGNORE|" : "",
flags & LCK_REVERT_MODE ? "REVERT|" : "");
if (len > 1)
buf[len - 2] = ' ';
else
buf[0] = '\0';
return buf;
}
char *get_last_lvm_error(void)
{
return last_error;
}
/*
* Hash lock info helpers
*/
static struct lv_info *lookup_info(const char *resource)
{
struct lv_info *lvi;
pthread_mutex_lock(&lv_hash_lock);
lvi = dm_hash_lookup(lv_hash, resource);
pthread_mutex_unlock(&lv_hash_lock);
return lvi;
}
static int insert_info(const char *resource, struct lv_info *lvi)
{
int ret;
pthread_mutex_lock(&lv_hash_lock);
ret = dm_hash_insert(lv_hash, resource, lvi);
pthread_mutex_unlock(&lv_hash_lock);
return ret;
}
static void remove_info(const char *resource)
{
int num_open;
pthread_mutex_lock(&lv_hash_lock);
dm_hash_remove(lv_hash, resource);
/* When last lock is remove, validate there are not left opened devices */
if (!dm_hash_get_first(lv_hash)) {
if (critical_section())
log_error(INTERNAL_ERROR "No volumes are locked however clvmd is in activation mode critical section.");
if ((num_open = dev_cache_check_for_open_devices()))
log_error(INTERNAL_ERROR "No volumes are locked however %d devices are still open.", num_open);
}
pthread_mutex_unlock(&lv_hash_lock);
}
/*
* Return the mode a lock is currently held at (or -1 if not held)
*/
static int get_current_lock(char *resource)
{
struct lv_info *lvi;
if ((lvi = lookup_info(resource)))
return lvi->lock_mode;
return -1;
}
void init_lvhash(void)
{
/* Create hash table for keeping LV locks & status */
lv_hash = dm_hash_create(1024);
pthread_mutex_init(&lv_hash_lock, NULL);
pthread_mutex_init(&lvm_lock, NULL);
}
/* Called at shutdown to tidy the lockspace */
void destroy_lvhash(void)
{
struct dm_hash_node *v;
struct lv_info *lvi;
char *resource;
int status;
pthread_mutex_lock(&lv_hash_lock);
dm_hash_iterate(v, lv_hash) {
lvi = dm_hash_get_data(lv_hash, v);
resource = dm_hash_get_key(lv_hash, v);
if ((status = sync_unlock(resource, lvi->lock_id)))
DEBUGLOG("unlock_all. unlock failed(%d): %s\n",
status, strerror(errno));
dm_free(lvi);
}
dm_hash_destroy(lv_hash);
lv_hash = NULL;
pthread_mutex_unlock(&lv_hash_lock);
}
/* Gets a real lock and keeps the info in the hash table */
static int hold_lock(char *resource, int mode, int flags)
{
int status;
int saved_errno;
struct lv_info *lvi;
/* Mask off invalid options */
flags &= LCKF_NOQUEUE | LCKF_CONVERT;
lvi = lookup_info(resource);
if (lvi) {
if (lvi->lock_mode == mode) {
DEBUGLOG("hold_lock, lock mode %d already held\n",
mode);
return 0;
}
if ((lvi->lock_mode == LCK_EXCL) && (mode == LCK_WRITE)) {
DEBUGLOG("hold_lock, lock already held LCK_EXCL, "
"ignoring LCK_WRITE request\n");
return 0;
}
}
/* Only allow explicit conversions */
if (lvi && !(flags & LCKF_CONVERT)) {
errno = EBUSY;
return -1;
}
if (lvi) {
/* Already exists - convert it */
status = sync_lock(resource, mode, flags, &lvi->lock_id);
saved_errno = errno;
if (!status)
lvi->lock_mode = mode;
else
DEBUGLOG("hold_lock. convert to %d failed: %s\n", mode,
strerror(errno));
errno = saved_errno;
} else {
if (!(lvi = dm_malloc(sizeof(struct lv_info)))) {
errno = ENOMEM;
return -1;
}
lvi->lock_mode = mode;
lvi->lock_id = 0;
status = sync_lock(resource, mode, flags & ~LCKF_CONVERT, &lvi->lock_id);
saved_errno = errno;
if (status) {
dm_free(lvi);
DEBUGLOG("hold_lock. lock at %d failed: %s\n", mode,
strerror(errno));
} else
if (!insert_info(resource, lvi)) {
errno = ENOMEM;
return -1;
}
errno = saved_errno;
}
return status;
}
/* Unlock and remove it from the hash table */
static int hold_unlock(char *resource)
{
struct lv_info *lvi;
int status;
int saved_errno;
if (!(lvi = lookup_info(resource))) {
DEBUGLOG("hold_unlock, lock not already held\n");
return 0;
}
status = sync_unlock(resource, lvi->lock_id);
saved_errno = errno;
if (!status) {
remove_info(resource);
dm_free(lvi);
} else {
DEBUGLOG("hold_unlock. unlock failed(%d): %s\n", status,
strerror(errno));
}
errno = saved_errno;
return status;
}
/* Watch the return codes here.
liblvm API functions return 1(true) for success, 0(false) for failure and don't set errno.
libdlm API functions return 0 for success, -1 for failure and do set errno.
These functions here return 0 for success or >0 for failure (where the retcode is errno)
*/
/* Activate LV exclusive or non-exclusive */
static int do_activate_lv(char *resource, unsigned char command, unsigned char lock_flags, int mode)
{
int oldmode;
int status;
int activate_lv;
int exclusive = 0;
struct lvinfo lvi;
/* Is it already open ? */
oldmode = get_current_lock(resource);
if (oldmode == mode && (command & LCK_CLUSTER_VG)) {
DEBUGLOG("do_activate_lv, lock already held at %d\n", oldmode);
return 0; /* Nothing to do */
}
/* Does the config file want us to activate this LV ? */
if (!lv_activation_filter(cmd, resource, &activate_lv, NULL))
return EIO;
if (!activate_lv)
return 0; /* Success, we did nothing! */
/* Do we need to activate exclusively? */
if ((activate_lv == 2) || (mode == LCK_EXCL)) {
exclusive = 1;
mode = LCK_EXCL;
}
/*
* Try to get the lock if it's a clustered volume group.
* Use lock conversion only if requested, to prevent implicit conversion
* of exclusive lock to shared one during activation.
*/
if (!test_mode() && command & LCK_CLUSTER_VG) {
status = hold_lock(resource, mode, LCKF_NOQUEUE | ((lock_flags & LCK_CONVERT_MODE) ? LCKF_CONVERT:0));
if (status) {
/* Return an LVM-sensible error for this.
* Forcing EIO makes the upper level return this text
* rather than the strerror text for EAGAIN.
*/
if (errno == EAGAIN) {
sprintf(last_error, "Volume is busy on another node");
errno = EIO;
}
return errno;
}
}
/* If it's suspended then resume it */
if (!lv_info_by_lvid(cmd, resource, 0, &lvi, 0, 0))
goto error;
if (lvi.suspended) {
critical_section_inc(cmd, "resuming");
if (!lv_resume(cmd, resource, 0, NULL)) {
critical_section_dec(cmd, "resumed");
goto error;
}
}
/* Now activate it */
if (!lv_activate(cmd, resource, exclusive, 0, 0, NULL))
goto error;
return 0;
error:
if (!test_mode() && (oldmode == -1 || oldmode != mode))
(void)hold_unlock(resource);
return EIO;
}
/* Resume the LV if it was active */
static int do_resume_lv(char *resource, unsigned char command, unsigned char lock_flags)
{
int oldmode, origin_only, exclusive, revert;
/* Is it open ? */
oldmode = get_current_lock(resource);
if (oldmode == -1 && (command & LCK_CLUSTER_VG)) {
DEBUGLOG("do_resume_lv, lock not already held\n");
return 0; /* We don't need to do anything */
}
origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0;
exclusive = (oldmode == LCK_EXCL) ? 1 : 0;
revert = (lock_flags & LCK_REVERT_MODE) ? 1 : 0;
if (!lv_resume_if_active(cmd, resource, origin_only, exclusive, revert, NULL))
return EIO;
return 0;
}
/* Suspend the device if active */
static int do_suspend_lv(char *resource, unsigned char command, unsigned char lock_flags)
{
int oldmode;
unsigned origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0;
unsigned exclusive;
/* Is it open ? */
oldmode = get_current_lock(resource);
if (oldmode == -1 && (command & LCK_CLUSTER_VG)) {
DEBUGLOG("do_suspend_lv, lock not already held\n");
return 0; /* Not active, so it's OK */
}
exclusive = (oldmode == LCK_EXCL) ? 1 : 0;
/* Always call lv_suspend to read commited and precommited data */
if (!lv_suspend_if_active(cmd, resource, origin_only, exclusive, NULL, NULL))
return EIO;
return 0;
}
static int do_deactivate_lv(char *resource, unsigned char command, unsigned char lock_flags)
{
int oldmode;
int status;
/* Is it open ? */
oldmode = get_current_lock(resource);
if (oldmode == -1 && (command & LCK_CLUSTER_VG)) {
DEBUGLOG("do_deactivate_lock, lock not already held\n");
return 0; /* We don't need to do anything */
}
if (!lv_deactivate(cmd, resource, NULL))
return EIO;
if (!test_mode() && command & LCK_CLUSTER_VG) {
status = hold_unlock(resource);
if (status)
return errno;
}
return 0;
}
const char *do_lock_query(char *resource)
{
int mode;
const char *type;
mode = get_current_lock(resource);
switch (mode) {
case LCK_NULL: type = "NL"; break;
case LCK_READ: type = "CR"; break;
case LCK_PREAD:type = "PR"; break;
case LCK_WRITE:type = "PW"; break;
case LCK_EXCL: type = "EX"; break;
default: type = NULL;
}
DEBUGLOG("do_lock_query: resource '%s', mode %i (%s)\n", resource, mode, type ?: "--");
return type;
}
/* This is the LOCK_LV part that happens on all nodes in the cluster -
it is responsible for the interaction with device-mapper and LVM */
int do_lock_lv(unsigned char command, unsigned char lock_flags, char *resource)
{
int status = 0;
DEBUGLOG("do_lock_lv: resource '%s', cmd = %s, flags = %s, critical_section = %d\n",
resource, decode_locking_cmd(command), decode_flags(lock_flags), critical_section());
if (!cmd->initialized.config || config_files_changed(cmd)) {
/* Reinitialise various settings inc. logging, filters */
if (do_refresh_cache()) {
log_error("Updated config file invalid. Aborting.");
return EINVAL;
}
}
pthread_mutex_lock(&lvm_lock);
init_test((lock_flags & LCK_TEST_MODE) ? 1 : 0);
if (lock_flags & LCK_MIRROR_NOSYNC_MODE)
init_mirror_in_sync(1);
if (lock_flags & LCK_DMEVENTD_MONITOR_IGNORE)
init_dmeventd_monitor(DMEVENTD_MONITOR_IGNORE);
else {
if (lock_flags & LCK_DMEVENTD_MONITOR_MODE)
init_dmeventd_monitor(1);
else
init_dmeventd_monitor(0);
}
cmd->partial_activation = (lock_flags & LCK_PARTIAL_MODE) ? 1 : 0;
/* clvmd should never try to read suspended device */
init_ignore_suspended_devices(1);
switch (command & LCK_MASK) {
case LCK_LV_EXCLUSIVE:
status = do_activate_lv(resource, command, lock_flags, LCK_EXCL);
break;
case LCK_LV_SUSPEND:
status = do_suspend_lv(resource, command, lock_flags);
break;
case LCK_UNLOCK:
case LCK_LV_RESUME: /* if active */
status = do_resume_lv(resource, command, lock_flags);
break;
case LCK_LV_ACTIVATE:
status = do_activate_lv(resource, command, lock_flags, LCK_READ);
break;
case LCK_LV_DEACTIVATE:
status = do_deactivate_lv(resource, command, lock_flags);
break;
default:
DEBUGLOG("Invalid LV command 0x%x\n", command);
status = EINVAL;
break;
}
if (lock_flags & LCK_MIRROR_NOSYNC_MODE)
init_mirror_in_sync(0);
cmd->partial_activation = 0;
/* clean the pool for another command */
dm_pool_empty(cmd->mem);
init_test(0);
pthread_mutex_unlock(&lvm_lock);
DEBUGLOG("Command return is %d, critical_section is %d\n", status, critical_section());
return status;
}
/* Functions to do on the local node only BEFORE the cluster-wide stuff above happens */
int pre_lock_lv(unsigned char command, unsigned char lock_flags, char *resource)
{
/* Nearly all the stuff happens cluster-wide. Apart from SUSPEND. Here we get the
lock out on this node (because we are the node modifying the metadata)
before suspending cluster-wide.
LCKF_CONVERT is used always, local node is going to modify metadata
*/
if ((command & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_SUSPEND &&
(command & LCK_CLUSTER_VG)) {
DEBUGLOG("pre_lock_lv: resource '%s', cmd = %s, flags = %s\n",
resource, decode_locking_cmd(command), decode_flags(lock_flags));
if (!(lock_flags & LCK_TEST_MODE) &&
hold_lock(resource, LCK_WRITE, LCKF_NOQUEUE | LCKF_CONVERT))
return errno;
}
return 0;
}
/* Functions to do on the local node only AFTER the cluster-wide stuff above happens */
int post_lock_lv(unsigned char command, unsigned char lock_flags,
char *resource)
{
int status;
unsigned origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0;
/* Opposite of above, done on resume after a metadata update */
if ((command & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_RESUME &&
(command & LCK_CLUSTER_VG)) {
int oldmode;
DEBUGLOG("post_lock_lv: resource '%s', cmd = %s, flags = %s\n",
resource, decode_locking_cmd(command), decode_flags(lock_flags));
/* If the lock state is PW then restore it to what it was */
oldmode = get_current_lock(resource);
if (oldmode == LCK_WRITE) {
struct lvinfo lvi;
pthread_mutex_lock(&lvm_lock);
status = lv_info_by_lvid(cmd, resource, origin_only, &lvi, 0, 0);
pthread_mutex_unlock(&lvm_lock);
if (!status)
return EIO;
if (!(lock_flags & LCK_TEST_MODE)) {
if (lvi.exists) {
if (hold_lock(resource, LCK_READ, LCKF_CONVERT))
return errno;
} else if (hold_unlock(resource))
return errno;
}
}
}
return 0;
}
/* Check if a VG is in use by LVM1 so we don't stomp on it */
int do_check_lvm1(const char *vgname)
{
int status;
status = check_lvm1_vg_inactive(cmd, vgname);
return status == 1 ? 0 : EBUSY;
}
int do_refresh_cache(void)
{
DEBUGLOG("Refreshing context\n");
log_notice("Refreshing context");
pthread_mutex_lock(&lvm_lock);
if (!refresh_toolcontext(cmd)) {
pthread_mutex_unlock(&lvm_lock);
return -1;
}
init_full_scan_done(0);
init_ignore_suspended_devices(1);
lvmcache_force_next_label_scan();
lvmcache_label_scan(cmd);
dm_pool_empty(cmd->mem);
pthread_mutex_unlock(&lvm_lock);
return 0;
}
/*
* Handle VG lock - drop metadata or update lvmcache state
*/
void do_lock_vg(unsigned char command, unsigned char lock_flags, char *resource)
{
uint32_t lock_cmd = command;
char *vgname = resource + 2;
lock_cmd &= (LCK_SCOPE_MASK | LCK_TYPE_MASK | LCK_HOLD);
/*
* Check if LCK_CACHE should be set. All P_ locks except # are cache related.
*/
if (strncmp(resource, "P_#", 3) && !strncmp(resource, "P_", 2))
lock_cmd |= LCK_CACHE;
DEBUGLOG("do_lock_vg: resource '%s', cmd = %s, flags = %s, critical_section = %d\n",
resource, decode_full_locking_cmd(lock_cmd), decode_flags(lock_flags), critical_section());
/* P_#global causes a full cache refresh */
if (!strcmp(resource, "P_" VG_GLOBAL)) {
do_refresh_cache();
return;
}
pthread_mutex_lock(&lvm_lock);
init_test((lock_flags & LCK_TEST_MODE) ? 1 : 0);
switch (lock_cmd) {
case LCK_VG_COMMIT:
DEBUGLOG("vg_commit notification for VG %s\n", vgname);
lvmcache_commit_metadata(vgname);
break;
case LCK_VG_REVERT:
DEBUGLOG("vg_revert notification for VG %s\n", vgname);
lvmcache_drop_metadata(vgname, 1);
break;
case LCK_VG_DROP_CACHE:
default:
DEBUGLOG("Invalidating cached metadata for VG %s\n", vgname);
lvmcache_drop_metadata(vgname, 0);
}
init_test(0);
pthread_mutex_unlock(&lvm_lock);
}
/*
* Ideally, clvmd should be started before any LVs are active
* but this may not be the case...
* I suppose this also comes in handy if clvmd crashes, not that it would!
*/
static int get_initial_state(struct dm_hash_table *excl_uuid)
{
int lock_mode;
char lv[65], vg[65], flags[26], vg_flags[26]; /* with space for '\0' */
char uuid[65];
char line[255];
char *lvs_cmd;
const char *lvm_binary = getenv("LVM_BINARY") ? : LVM_PATH;
FILE *lvs;
if (dm_asprintf(&lvs_cmd, "%s lvs --config 'log{command_names=0 prefix=\"\"}' "
"--nolocking --noheadings -o vg_uuid,lv_uuid,lv_attr,vg_attr",
lvm_binary) < 0)
return_0;
/* FIXME: Maybe link and use liblvm2cmd directly instead of fork */
if (!(lvs = popen(lvs_cmd, "r"))) {
dm_free(lvs_cmd);
return 0;
}
while (fgets(line, sizeof(line), lvs)) {
if (sscanf(line, "%64s %64s %25s %25s\n", vg, lv, flags, vg_flags) == 4) {
/* States: s:suspended a:active S:dropped snapshot I:invalid snapshot */
if (strlen(vg) == 38 && /* is is a valid UUID ? */
(flags[4] == 'a' || flags[4] == 's') && /* is it active or suspended? */
vg_flags[5] == 'c') { /* is it clustered ? */
/* Convert hyphen-separated UUIDs into one */
memcpy(&uuid[0], &vg[0], 6);
memcpy(&uuid[6], &vg[7], 4);
memcpy(&uuid[10], &vg[12], 4);
memcpy(&uuid[14], &vg[17], 4);
memcpy(&uuid[18], &vg[22], 4);
memcpy(&uuid[22], &vg[27], 4);
memcpy(&uuid[26], &vg[32], 6);
memcpy(&uuid[32], &lv[0], 6);
memcpy(&uuid[38], &lv[7], 4);
memcpy(&uuid[42], &lv[12], 4);
memcpy(&uuid[46], &lv[17], 4);
memcpy(&uuid[50], &lv[22], 4);
memcpy(&uuid[54], &lv[27], 4);
memcpy(&uuid[58], &lv[32], 6);
uuid[64] = '\0';
/* Look for this lock in the list of EX locks
we were passed on the command-line */
lock_mode = (dm_hash_lookup(excl_uuid, uuid)) ?
LCK_EXCL : LCK_READ;
DEBUGLOG("getting initial lock for %s\n", uuid);
if (hold_lock(uuid, lock_mode, LCKF_NOQUEUE))
DEBUGLOG("Failed to hold lock %s\n", uuid);
}
}
}
if (pclose(lvs))
DEBUGLOG("lvs pclose failed: %s\n", strerror(errno));
dm_free(lvs_cmd);
return 1;
}
static void lvm2_log_fn(int level, const char *file, int line, int dm_errno,
const char *message)
{
/* Send messages to the normal LVM2 logging system too,
so we get debug output when it's asked for.
We need to NULL the function ptr otherwise it will just call
back into here! */
init_log_fn(NULL);
print_log(level, file, line, dm_errno, "%s", message);
init_log_fn(lvm2_log_fn);
/*
* Ignore non-error messages, but store the latest one for returning
* to the user.
*/
if (level != _LOG_ERR && level != _LOG_FATAL)
return;
strncpy(last_error, message, sizeof(last_error));
last_error[sizeof(last_error)-1] = '\0';
}
/* This checks some basic cluster-LVM configuration stuff */
static void check_config(void)
{
int locking_type;
locking_type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);
if (locking_type == 3) /* compiled-in cluster support */
return;
if (locking_type == 2) { /* External library, check name */
const char *libname;
libname = find_config_tree_str(cmd, global_locking_library_CFG, NULL);
if (libname && strstr(libname, "liblvm2clusterlock.so"))
return;
log_error("Incorrect LVM locking library specified in lvm.conf, cluster operations may not work.");
return;
}
log_error("locking_type not set correctly in lvm.conf, cluster operations will not work.");
}
/* Backups up the LVM metadata if it's changed */
void lvm_do_backup(const char *vgname)
{
struct volume_group * vg;
int consistent = 0;
DEBUGLOG("Triggering backup of VG metadata for %s.\n", vgname);
pthread_mutex_lock(&lvm_lock);
vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, WARN_PV_READ, &consistent);
if (vg && consistent)
check_current_backup(vg);
else
log_error("Error backing up metadata, can't find VG for group %s", vgname);
release_vg(vg);
dm_pool_empty(cmd->mem);
pthread_mutex_unlock(&lvm_lock);
}
struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name)
{
struct lv_info *lvi;
*name = NULL;
if (!v)
v = dm_hash_get_first(lv_hash);
do {
if (v) {
lvi = dm_hash_get_data(lv_hash, v);
DEBUGLOG("Looking for EX locks. found %x mode %d\n", lvi->lock_id, lvi->lock_mode);
if (lvi->lock_mode == LCK_EXCL) {
*name = dm_hash_get_key(lv_hash, v);
}
v = dm_hash_get_next(lv_hash, v);
}
} while (v && !*name);
if (*name)
DEBUGLOG("returning EXclusive UUID %s\n", *name);
return v;
}
void lvm_do_fs_unlock(void)
{
pthread_mutex_lock(&lvm_lock);
DEBUGLOG("Syncing device names\n");
fs_unlock();
pthread_mutex_unlock(&lvm_lock);
}
/* Called to initialise the LVM context of the daemon */
int init_clvm(struct dm_hash_table *excl_uuid)
{
/* Use LOG_DAEMON for syslog messages instead of LOG_USER */
init_syslog(LOG_DAEMON);
openlog("clvmd", LOG_PID, LOG_DAEMON);
/* Initialise already held locks */
if (!get_initial_state(excl_uuid))
log_error("Cannot load initial lock states.");
if (!udev_init_library_context())
stack;
if (!(cmd = create_toolcontext(1, NULL, 0, 1, 1, 1))) {
log_error("Failed to allocate command context");
udev_fin_library_context();
return 0;
}
if (stored_errno()) {
destroy_toolcontext(cmd);
return 0;
}
cmd->cmd_line = "clvmd";
/* Check lvm.conf is setup for cluster-LVM */
check_config();
init_ignore_suspended_devices(1);
/* Trap log messages so we can pass them back to the user */
init_log_fn(lvm2_log_fn);
memlock_inc_daemon(cmd);
return 1;
}
void destroy_lvm(void)
{
if (cmd) {
memlock_dec_daemon(cmd);
destroy_toolcontext(cmd);
udev_fin_library_context();
cmd = NULL;
}
}

View File

@ -0,0 +1,41 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Functions in lvm-functions.c */
#ifndef _LVM_FUNCTIONS_H
#define _LVM_FUNCTIONS_H
extern int pre_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern const char *do_lock_query(char *resource);
extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern int do_check_lvm1(const char *vgname);
extern int do_refresh_cache(void);
extern int init_clvm(struct dm_hash_table *excl_uuid);
extern void destroy_lvm(void);
extern void init_lvhash(void);
extern void destroy_lvhash(void);
extern void lvm_do_backup(const char *vgname);
extern char *get_last_lvm_error(void);
extern void do_lock_vg(unsigned char command, unsigned char lock_flags,
char *resource);
extern struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name);
void lvm_do_fs_unlock(void);
#endif

View File

@ -0,0 +1,382 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* FIXME Remove duplicated functions from this file. */
/*
* Send a command to a running clvmd from the command-line
*/
#include "clvmd-common.h"
#include "clvm.h"
#include "refresh_clvmd.h"
#include <stddef.h>
#include <sys/socket.h>
#include <sys/un.h>
typedef struct lvm_response {
char node[255];
char *response;
int status;
int len;
} lvm_response_t;
/*
* This gets stuck at the start of memory we allocate so we
* can sanity-check it at deallocation time
*/
#define LVM_SIGNATURE 0x434C564D
static int _clvmd_sock = -1;
/* Open connection to the clvm daemon */
static int _open_local_sock(void)
{
int local_socket;
struct sockaddr_un sockaddr = { .sun_family = AF_UNIX };
if (!dm_strncpy(sockaddr.sun_path, CLVMD_SOCKNAME, sizeof(sockaddr.sun_path))) {
fprintf(stderr, "%s: clvmd socket name too long.", CLVMD_SOCKNAME);
return -1;
}
/* Open local socket */
if ((local_socket = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
fprintf(stderr, "Local socket creation failed: %s", strerror(errno));
return -1;
}
if (connect(local_socket,(struct sockaddr *) &sockaddr,
sizeof(sockaddr))) {
int saved_errno = errno;
fprintf(stderr, "connect() failed on local socket: %s\n",
strerror(errno));
if (close(local_socket))
return -1;
errno = saved_errno;
return -1;
}
return local_socket;
}
/* Send a request and return the status */
static int _send_request(const char *inbuf, int inlen, char **retbuf, int no_response)
{
char outbuf[PIPE_BUF];
struct clvm_header *outheader = (struct clvm_header *) outbuf;
int len;
unsigned off;
int buflen;
int err;
/* Send it to CLVMD */
rewrite:
if ( (err = write(_clvmd_sock, inbuf, inlen)) != inlen) {
if (err == -1 && errno == EINTR)
goto rewrite;
fprintf(stderr, "Error writing data to clvmd: %s", strerror(errno));
return 0;
}
if (no_response)
return 1;
/* Get the response */
reread:
if ((len = read(_clvmd_sock, outbuf, sizeof(struct clvm_header))) < 0) {
if (errno == EINTR)
goto reread;
fprintf(stderr, "Error reading data from clvmd: %s", strerror(errno));
return 0;
}
if (len == 0) {
fprintf(stderr, "EOF reading CLVMD");
errno = ENOTCONN;
return 0;
}
/* Allocate buffer */
buflen = len + outheader->arglen;
*retbuf = dm_malloc(buflen);
if (!*retbuf) {
errno = ENOMEM;
return 0;
}
/* Copy the header */
memcpy(*retbuf, outbuf, len);
outheader = (struct clvm_header *) *retbuf;
/* Read the returned values */
off = 1; /* we've already read the first byte */
while (off <= outheader->arglen && len > 0) {
len = read(_clvmd_sock, outheader->args + off,
buflen - off - offsetof(struct clvm_header, args));
if (len > 0)
off += len;
}
/* Was it an error ? */
if (outheader->status != 0) {
errno = outheader->status;
/* Only return an error here if there are no node-specific
errors present in the message that might have more detail */
if (!(outheader->flags & CLVMD_FLAG_NODEERRS)) {
fprintf(stderr, "cluster request failed: %s\n", strerror(errno));
return 0;
}
}
return 1;
}
/* Build the structure header and parse-out wildcard node names */
static void _build_header(struct clvm_header *head, int cmd, const char *node,
unsigned int len)
{
head->cmd = cmd;
head->status = 0;
head->flags = 0;
head->xid = 0;
head->clientid = 0;
if (len)
/* 1 byte is used from struct clvm_header.args[1], so -> len - 1 */
head->arglen = len - 1;
else {
head->arglen = 0;
*head->args = '\0';
}
/*
* Translate special node names.
*/
if (!node || !strcmp(node, NODE_ALL))
head->node[0] = '\0';
else if (!strcmp(node, NODE_LOCAL)) {
head->node[0] = '\0';
head->flags = CLVMD_FLAG_LOCAL;
} else
strcpy(head->node, node);
}
/*
* Send a message to a(or all) node(s) in the cluster and wait for replies
*/
static int _cluster_request(char cmd, const char *node, void *data, int len,
lvm_response_t ** response, int *num, int no_response)
{
char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1];
char *inptr;
char *retbuf = NULL;
int status;
int i;
int num_responses = 0;
struct clvm_header *head = (struct clvm_header *) outbuf;
lvm_response_t *rarray;
*num = 0;
if (_clvmd_sock == -1)
_clvmd_sock = _open_local_sock();
if (_clvmd_sock == -1)
return 0;
_build_header(head, cmd, node, len);
if (len)
memcpy(head->node + strlen(head->node) + 1, data, len);
status = _send_request(outbuf, sizeof(struct clvm_header) +
strlen(head->node) + len, &retbuf, no_response);
if (!status || no_response)
goto out;
/* Count the number of responses we got */
head = (struct clvm_header *) retbuf;
inptr = head->args;
while (inptr[0]) {
num_responses++;
inptr += strlen(inptr) + 1;
inptr += sizeof(int);
inptr += strlen(inptr) + 1;
}
/*
* Allocate response array.
* With an extra pair of INTs on the front to sanity
* check the pointer when we are given it back to free
*/
*response = NULL;
if (!(rarray = dm_malloc(sizeof(lvm_response_t) * num_responses +
sizeof(int) * 2))) {
errno = ENOMEM;
status = 0;
goto out;
}
/* Unpack the response into an lvm_response_t array */
inptr = head->args;
i = 0;
while (inptr[0]) {
strcpy(rarray[i].node, inptr);
inptr += strlen(inptr) + 1;
memcpy(&rarray[i].status, inptr, sizeof(int));
inptr += sizeof(int);
rarray[i].response = dm_malloc(strlen(inptr) + 1);
if (rarray[i].response == NULL) {
/* Free up everything else and return error */
int j;
for (j = 0; j < i; j++)
dm_free(rarray[i].response);
dm_free(rarray);
errno = ENOMEM;
status = 0;
goto out;
}
strcpy(rarray[i].response, inptr);
rarray[i].len = strlen(inptr);
inptr += strlen(inptr) + 1;
i++;
}
*num = num_responses;
*response = rarray;
out:
dm_free(retbuf);
return status;
}
/* Free reply array */
static int _cluster_free_request(lvm_response_t * response, int num)
{
int i;
for (i = 0; i < num; i++) {
dm_free(response[i].response);
}
dm_free(response);
return 1;
}
int refresh_clvmd(int all_nodes)
{
int num_responses;
char args[1]; // No args really.
lvm_response_t *response = NULL;
int saved_errno;
int status;
int i;
status = _cluster_request(CLVMD_CMD_REFRESH, all_nodes ? NODE_ALL : NODE_LOCAL, args, 0, &response, &num_responses, 0);
/* If any nodes were down then display them and return an error */
for (i = 0; i < num_responses; i++) {
if (response[i].status == EHOSTDOWN) {
fprintf(stderr, "clvmd not running on node %s",
response[i].node);
status = 0;
errno = response[i].status;
} else if (response[i].status) {
fprintf(stderr, "Error resetting node %s: %s",
response[i].node,
response[i].response[0] ?
response[i].response :
strerror(response[i].status));
status = 0;
errno = response[i].status;
}
}
saved_errno = errno;
_cluster_free_request(response, num_responses);
errno = saved_errno;
return status;
}
int restart_clvmd(int all_nodes)
{
int dummy, status;
status = _cluster_request(CLVMD_CMD_RESTART, all_nodes ? NODE_ALL : NODE_LOCAL, NULL, 0, NULL, &dummy, 1);
/*
* FIXME: we cannot receive response, clvmd re-exec before it.
* but also should not close socket too early (the whole rq is dropped then).
* FIXME: This should be handled this way:
* - client waits for RESTART ack (and socket close)
* - server restarts
* - client checks that server is ready again (VERSION command?)
*/
usleep(500000);
return status;
}
int debug_clvmd(int level, int clusterwide)
{
int num_responses;
char args[1];
const char *nodes;
lvm_response_t *response = NULL;
int saved_errno;
int status;
int i;
args[0] = level;
if (clusterwide)
nodes = NODE_ALL;
else
nodes = NODE_LOCAL;
status = _cluster_request(CLVMD_CMD_SET_DEBUG, nodes, args, 1, &response, &num_responses, 0);
/* If any nodes were down then display them and return an error */
for (i = 0; i < num_responses; i++) {
if (response[i].status == EHOSTDOWN) {
fprintf(stderr, "clvmd not running on node %s",
response[i].node);
status = 0;
errno = response[i].status;
} else if (response[i].status) {
fprintf(stderr, "Error setting debug on node %s: %s",
response[i].node,
response[i].response[0] ?
response[i].response :
strerror(response[i].status));
status = 0;
errno = response[i].status;
}
}
saved_errno = errno;
_cluster_free_request(response, num_responses);
errno = saved_errno;
return status;
}

View File

@ -0,0 +1,19 @@
/*
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
int refresh_clvmd(int all_nodes);
int restart_clvmd(int all_nodes);
int debug_clvmd(int level, int clusterwide);

View File

@ -17,27 +17,23 @@ top_builddir = @top_builddir@
CPG_LIBS = @CPG_LIBS@
CPG_CFLAGS = @CPG_CFLAGS@
SACKPT_LIBS = @SACKPT_LIBS@
SACKPT_CFLAGS = @SACKPT_CFLAGS@
SOURCES = clogd.c cluster.c compat.c functions.c link_mon.c local.c logging.c
TARGETS = cmirrord
CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES))
CFLOW_TARGET := $(TARGETS)
include $(top_builddir)/make.tmpl
LMLIBS += $(CPG_LIBS)
CFLAGS += $(CPG_CFLAGS) $(EXTRA_EXEC_CFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
LIBS += -ldevmapper
LMLIBS += $(CPG_LIBS) $(SACKPT_LIBS)
CFLAGS += $(CPG_CFLAGS) $(SACKPT_CFLAGS) $(EXTRA_EXEC_CFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS)
cmirrord: $(OBJECTS)
@echo " [CC] $@"
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) \
$(LMLIBS) -L$(top_builddir)/libdm -ldevmapper $(LIBS)
cmirrord: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) \
$(LVMLIBS) $(LMLIBS) $(LIBS)
install_cluster: $(TARGETS)
@echo " [INSTALL] $<"
$(Q) $(INSTALL_PROGRAM) -D $< $(usrsbindir)/$(<F)
install: install_cluster
install: $(TARGETS)
$(INSTALL_PROGRAM) -D cmirrord $(usrsbindir)/cmirrord

View File

@ -245,7 +245,6 @@ static void daemonize(void)
}
LOG_OPEN("cmirrord", LOG_PID, LOG_DAEMON);
/* coverity[leaked_handle] devnull cannot leak here */
}
/*

View File

@ -16,11 +16,7 @@
#include "functions.h"
#include "link_mon.h"
#include "local.h"
#include "lib/mm/xlate.h"
#include "base/memory/zalloc.h"
/* FIXME: remove this and the code */
#define CMIRROR_HAS_CHECKPOINT 0
#include "xlate.h"
#include <corosync/cpg.h>
#include <errno.h>
@ -108,7 +104,7 @@ static SaVersionT version = { 'B', 1, 1 };
#endif
#define DEBUGGING_HISTORY 100
#define DEBUGGING_BUFLEN 270
#define DEBUGGING_BUFLEN 128
#define LOG_SPRINT(cc, f, arg...) do { \
cc->idx++; \
cc->idx = cc->idx % DEBUGGING_HISTORY; \
@ -170,9 +166,6 @@ int cluster_send(struct clog_request *rq)
{
int r;
int found = 0;
#if CMIRROR_HAS_CHECKPOINT
int count = 0;
#endif
struct iovec iov;
struct clog_cpg *entry;
@ -189,7 +182,7 @@ int cluster_send(struct clog_request *rq)
}
/*
* Once the request heads for the cluster, the luid loses
* Once the request heads for the cluster, the luid looses
* all its meaning.
*/
rq->u_rq.luid = 0;
@ -210,6 +203,8 @@ int cluster_send(struct clog_request *rq)
#if CMIRROR_HAS_CHECKPOINT
do {
int count = 0;
r = cpg_mcast_joined(entry->handle, CPG_TYPE_AGREED, &iov, 1);
if (r != SA_AIS_ERR_TRY_AGAIN)
break;
@ -403,12 +398,13 @@ static struct checkpoint_data *prepare_checkpoint(struct clog_cpg *entry,
return NULL;
}
new = zalloc(sizeof(*new));
new = malloc(sizeof(*new));
if (!new) {
LOG_ERROR("Unable to create checkpoint data for %u",
cp_requester);
return NULL;
}
memset(new, 0, sizeof(*new));
new->requester = cp_requester;
strncpy(new->uuid, entry->name.value, entry->name.length);
@ -643,12 +639,13 @@ static int export_checkpoint(struct checkpoint_data *cp)
rq_size += RECOVERING_REGION_SECTION_SIZE;
rq_size += cp->bitmap_size * 2; /* clean|sync_bits */
rq = zalloc(rq_size);
rq = malloc(rq_size);
if (!rq) {
LOG_ERROR("export_checkpoint: "
"Unable to allocate transfer structs");
return -ENOMEM;
}
memset(rq, 0, rq_size);
dm_list_init(&rq->u.list);
rq->u_rq.request_type = DM_ULOG_CHECKPOINT_READY;
@ -1383,7 +1380,7 @@ static void cpg_leave_callback(struct clog_cpg *match,
size_t member_list_entries)
{
unsigned i;
int j, fd = -1;
int j, fd;
uint32_t lowest = match->lowest_id;
struct clog_request *rq, *n;
struct checkpoint_data *p_cp, *c_cp;
@ -1548,7 +1545,7 @@ static void cpg_config_callback(cpg_handle_t handle, const struct cpg_name *gnam
member_list, member_list_entries);
}
static cpg_callbacks_t cpg_callbacks = {
cpg_callbacks_t cpg_callbacks = {
.cpg_deliver_fn = cpg_message_callback,
.cpg_confchg_fn = cpg_config_callback,
};
@ -1620,11 +1617,12 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
return -EEXIST;
}
new = zalloc(sizeof(*new));
new = malloc(sizeof(*new));
if (!new) {
LOG_ERROR("Unable to allocate memory for clog_cpg");
return -ENOMEM;
}
memset(new, 0, sizeof(*new));
dm_list_init(&new->list);
new->lowest_id = 0xDEAD;
dm_list_init(&new->startup_list);
@ -1632,7 +1630,7 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
size = ((strlen(uuid) + 1) > CPG_MAX_NAME_LENGTH) ?
CPG_MAX_NAME_LENGTH : (strlen(uuid) + 1);
(void) dm_strncpy(new->name.value, uuid, size);
strncpy(new->name.value, uuid, size);
new->name.length = (uint32_t)size;
new->luid = luid;

View File

@ -12,8 +12,8 @@
#ifndef _LVM_CLOG_CLUSTER_H
#define _LVM_CLOG_CLUSTER_H
#include "libdm/libdevmapper.h"
#include "libdm/misc/dm-log-userspace.h"
#include "dm-log-userspace.h"
#include "libdevmapper.h"
#define DM_ULOG_RESPONSE 0x1000U /* in last byte of 32-bit value */
#define DM_ULOG_CHECKPOINT_READY 21
@ -39,7 +39,7 @@ struct clog_request {
* machine. If the two are equal, there is no need
* to do endian conversions.
*/
union version_u {
union {
uint64_t version[2]; /* LE version and native version */
struct dm_list list;
} u;

View File

@ -8,7 +8,7 @@
#include "logging.h"
#include "cluster.h"
#include "compat.h"
#include "lib/mm/xlate.h"
#include "xlate.h"
#include <errno.h>

View File

@ -11,7 +11,6 @@
*/
#include "logging.h"
#include "functions.h"
#include "base/memory/zalloc.h"
#include <sys/sysmacros.h>
#include <dirent.h>
@ -34,7 +33,7 @@
#define LOG_OFFSET 2
#define RESYNC_HISTORY 50
#define RESYNC_BUFLEN 270
#define RESYNC_BUFLEN 128
//static char resync_history[RESYNC_HISTORY][128];
//static int idx = 0;
#define LOG_SPRINT(_lc, f, arg...) do { \
@ -378,7 +377,7 @@ static int _clog_ctr(char *uuid, uint64_t luid,
uint32_t block_on_error = 0;
int disk_log;
char disk_path[PATH_MAX] = { 0 };
char disk_path[128];
int unlink_path = 0;
long page_size;
int pages;
@ -436,7 +435,7 @@ static int _clog_ctr(char *uuid, uint64_t luid,
block_on_error = 1;
}
lc = zalloc(sizeof(*lc));
lc = dm_zalloc(sizeof(*lc));
if (!lc) {
LOG_ERROR("Unable to allocate cluster log context");
r = -ENOMEM;
@ -452,19 +451,15 @@ static int _clog_ctr(char *uuid, uint64_t luid,
lc->skip_bit_warning = region_count;
lc->disk_fd = -1;
lc->log_dev_failed = 0;
if (!dm_strncpy(lc->uuid, uuid, DM_UUID_LEN)) {
LOG_ERROR("Cannot use too long UUID %s.", uuid);
r = -EINVAL;
goto fail;
}
strncpy(lc->uuid, uuid, DM_UUID_LEN);
lc->luid = luid;
if (get_log(lc->uuid, lc->luid) ||
get_pending_log(lc->uuid, lc->luid)) {
LOG_ERROR("[%s/%" PRIu64 "u] Log already exists, unable to create.",
SHORT_UUID(lc->uuid), lc->luid);
r = -EINVAL;
goto fail;
dm_free(lc);
return -EINVAL;
}
dm_list_init(&lc->mark_list);
@ -533,9 +528,9 @@ fail:
LOG_ERROR("Close device error, %s: %s",
disk_path, strerror(errno));
free(lc->disk_buffer);
free(lc->sync_bits);
free(lc->clean_bits);
free(lc);
dm_free(lc->sync_bits);
dm_free(lc->clean_bits);
dm_free(lc);
}
return r;
}
@ -658,10 +653,11 @@ static int clog_dtr(struct dm_ulog_request *rq)
if (lc->disk_fd != -1 && close(lc->disk_fd))
LOG_ERROR("Failed to close disk log: %s",
strerror(errno));
free(lc->disk_buffer);
free(lc->clean_bits);
free(lc->sync_bits);
free(lc);
if (lc->disk_buffer)
free(lc->disk_buffer);
dm_free(lc->clean_bits);
dm_free(lc->sync_bits);
dm_free(lc);
return 0;
}

View File

@ -12,8 +12,7 @@
#ifndef _LVM_CLOG_FUNCTIONS_H
#define _LVM_CLOG_FUNCTIONS_H
#include "libdm/libdevmapper.h"
#include "libdm/misc/dm-log-userspace.h"
#include "dm-log-userspace.h"
#include "cluster.h"
#define LOG_RESUMED 1

View File

@ -13,6 +13,10 @@
#ifndef _LVM_CLOG_LOGGING_H
#define _LVM_CLOG_LOGGING_H
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "configure.h"
#include <stdio.h>
#include <stdint.h>
#include <syslog.h>

View File

@ -14,21 +14,11 @@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
abs_srcdir = @abs_srcdir@
SOURCES = libdevmapper-event.c
SOURCES2 = dmeventd.c
TARGETS = dmeventd
CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES) $(SOURCES2) \
plugins/lvm2/dmeventd_lvm.c \
plugins/mirror/dmeventd_mirror.c \
plugins/raid/dmeventd_raid.c \
plugins/snapshot/dmeventd_snapshot.c \
plugins/thin/dmeventd_thin.c \
plugins/vdo/dmeventd_vdo.c \
)
CFLOW_TARGET := $(TARGETS)
.PHONY: install_lib_dynamic install_lib_static install_include \
install_pkgconfig install_dmeventd_dynamic install_dmeventd_static \
@ -47,7 +37,6 @@ endif
LIB_VERSION = $(LIB_VERSION_DM)
LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX)
LIBS = $(PTHREAD_LIBS) -L$(interfacebuilddir) -ldevmapper
CLEAN_TARGETS = dmeventd.static $(LIB_NAME).a
@ -57,6 +46,7 @@ endif
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
CFLOW_TARGET = dmeventd
EXPORTED_HEADER = $(srcdir)/libdevmapper-event.h
EXPORTED_FN_PREFIX = dm_event
@ -65,47 +55,51 @@ include $(top_builddir)/make.tmpl
all: device-mapper
device-mapper: $(TARGETS)
plugins.device-mapper: $(LIB_SHARED)
LIBS += -ldevmapper
LVMLIBS += -ldevmapper-event $(PTHREAD_LIBS)
CFLAGS_dmeventd.o += $(EXTRA_EXEC_CFLAGS)
dmeventd: $(LIB_SHARED) dmeventd.o
@echo " [CC] $@"
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) dmeventd.o \
-o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -L. -o $@ dmeventd.o \
$(DL_LIBS) $(LVMLIBS) $(LIBS) -rdynamic
dmeventd.static: $(LIB_STATIC) dmeventd.o
@echo " [CC] $@"
$(Q) $(CC) $(CFLAGS) $(LDFLAGS) -static dmeventd.o \
-o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS) $(STATIC_LIBS)
dmeventd.static: $(LIB_STATIC) dmeventd.o $(interfacebuilddir)/libdevmapper.a
$(CC) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) -static -L. -L$(interfacebuilddir) -o $@ \
dmeventd.o $(DL_LIBS) $(LVMLIBS) $(LIBS) $(STATIC_LIBS)
ifeq ("@PKGCONFIG@", "yes")
INSTALL_LIB_TARGETS += install_pkgconfig
endif
ifneq ("$(CFLOW_CMD)", "")
CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES))
-include $(top_builddir)/libdm/libdevmapper.cflow
-include $(top_builddir)/lib/liblvm-internal.cflow
-include $(top_builddir)/lib/liblvm2cmd.cflow
-include $(top_builddir)/daemons/dmeventd/$(LIB_NAME).cflow
-include $(top_builddir)/daemons/dmeventd/plugins/mirror/$(LIB_NAME)-lvm2mirror.cflow
endif
install_include: $(srcdir)/libdevmapper-event.h
@echo " [INSTALL] $(<F)"
$(Q) $(INSTALL_DATA) -D $< $(includedir)/$(<F)
$(INSTALL_DATA) -D $< $(includedir)/$(<F)
install_pkgconfig: libdevmapper-event.pc
@echo " [INSTALL] $<"
$(Q) $(INSTALL_DATA) -D $< $(pkgconfigdir)/devmapper-event.pc
$(INSTALL_DATA) -D $< $(pkgconfigdir)/devmapper-event.pc
install_lib_dynamic: install_lib_shared
install_lib_static: $(LIB_STATIC)
@echo " [INSTALL] $<"
$(Q) $(INSTALL_DATA) -D $< $(usrlibdir)/$(<F)
$(INSTALL_DATA) -D $< $(usrlibdir)/$(<F)
install_lib: $(INSTALL_LIB_TARGETS)
install_dmeventd_dynamic: dmeventd
@echo " [INSTALL] $<"
$(Q) $(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
install_dmeventd_static: dmeventd.static
@echo " [INSTALL] $<"
$(Q) $(INSTALL_PROGRAM) -D $< $(staticdir)/$(<F)
$(INSTALL_PROGRAM) -D $< $(staticdir)/$(<F)
install_dmeventd: $(INSTALL_DMEVENTD_TARGETS)

View File

@ -16,12 +16,12 @@
* dmeventd - dm event daemon to monitor active mapped devices
*/
#include "dm-logging.h"
#include "libdevmapper-event.h"
#include "dmeventd.h"
#include "libdm/misc/dm-logging.h"
#include "base/memory/zalloc.h"
#include "tool.h"
#include <dlfcn.h>
#include <pthread.h>
@ -33,8 +33,6 @@
#include <signal.h>
#include <arpa/inet.h> /* for htonl, ntohl */
#include <fcntl.h> /* for musl libc */
#include <unistd.h>
#include <syslog.h>
#ifdef __linux__
/*
@ -62,8 +60,8 @@
#endif
#define DM_SIGNALED_EXIT 1
#define DM_SCHEDULED_EXIT 2
#include <syslog.h>
static volatile sig_atomic_t _exit_now = 0; /* set to '1' when signal is given to exit */
/* List (un)link macros. */
@ -101,28 +99,13 @@ static time_t _idle_since = 0;
static char **_initial_registrations = 0;
/* FIXME Make configurable at runtime */
/* All libdm messages */
__attribute__((format(printf, 5, 6)))
static void _libdm_log(int level, const char *file, int line,
int dm_errno_or_class, const char *format, ...)
{
va_list ap;
va_start(ap, format);
dm_event_log("#dm", level, file, line, dm_errno_or_class, format, ap);
va_end(ap);
}
/* All dmeventd messages */
#undef LOG_MESG
#define LOG_MESG(l, f, ln, e, x...) _dmeventd_log(l, f, ln, e, ## x)
__attribute__((format(printf, 5, 6)))
__attribute__((format(printf, 4, 5)))
static void _dmeventd_log(int level, const char *file, int line,
int dm_errno_or_class, const char *format, ...)
const char *format, ...)
{
va_list ap;
va_start(ap, format);
dm_event_log("dmeventd", level, file, line, dm_errno_or_class, format, ap);
dm_event_log("dm", level, file, line, 0, format, ap);
va_end(ap);
}
@ -264,19 +247,19 @@ static pthread_cond_t _timeout_cond = PTHREAD_COND_INITIALIZER;
/* DSO data allocate/free. */
static void _free_dso_data(struct dso_data *data)
{
free(data->dso_name);
free(data);
dm_free(data->dso_name);
dm_free(data);
}
static struct dso_data *_alloc_dso_data(struct message_data *data)
{
struct dso_data *ret = (typeof(ret)) zalloc(sizeof(*ret));
struct dso_data *ret = (typeof(ret)) dm_zalloc(sizeof(*ret));
if (!ret)
return_NULL;
if (!(ret->dso_name = strdup(data->dso_name))) {
free(ret);
if (!(ret->dso_name = dm_strdup(data->dso_name))) {
dm_free(ret);
return_NULL;
}
@ -397,9 +380,9 @@ static void _free_thread_status(struct thread_status *thread)
_lib_put(thread->dso_data);
if (thread->wait_task)
dm_task_destroy(thread->wait_task);
free(thread->device.uuid);
free(thread->device.name);
free(thread);
dm_free(thread->device.uuid);
dm_free(thread->device.name);
dm_free(thread);
}
/* Note: events_field must not be 0, ensured by caller */
@ -408,7 +391,7 @@ static struct thread_status *_alloc_thread_status(const struct message_data *dat
{
struct thread_status *thread;
if (!(thread = zalloc(sizeof(*thread)))) {
if (!(thread = dm_zalloc(sizeof(*thread)))) {
log_error("Cannot create new thread, out of memory.");
return NULL;
}
@ -422,11 +405,11 @@ static struct thread_status *_alloc_thread_status(const struct message_data *dat
if (!dm_task_set_uuid(thread->wait_task, data->device_uuid))
goto_out;
if (!(thread->device.uuid = strdup(data->device_uuid)))
if (!(thread->device.uuid = dm_strdup(data->device_uuid)))
goto_out;
/* Until real name resolved, use UUID */
if (!(thread->device.name = strdup(data->device_uuid)))
if (!(thread->device.name = dm_strdup(data->device_uuid)))
goto_out;
/* runs ioctl and may register lvm2 pluging */
@ -470,7 +453,7 @@ static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *
/*
* We use a smaller stack since it gets preallocated in its entirety
*/
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE + getpagesize());
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
/*
* If no-one will be waiting, we need to detach.
@ -515,7 +498,7 @@ static int _fetch_string(char **ptr, char **src, const int delimiter)
if ((p = strchr(*src, delimiter))) {
if (*src < p) {
*p = 0; /* Temporary exit with \0 */
if (!(*ptr = strdup(*src))) {
if (!(*ptr = dm_strdup(*src))) {
log_error("Failed to fetch item %s.", *src);
ret = 0; /* Allocation fail */
}
@ -525,7 +508,7 @@ static int _fetch_string(char **ptr, char **src, const int delimiter)
(*src)++; /* Skip delmiter, next field */
} else if ((len = strlen(*src))) {
/* No delimiter, item ends with '\0' */
if (!(*ptr = strdup(*src))) {
if (!(*ptr = dm_strdup(*src))) {
log_error("Failed to fetch last item %s.", *src);
ret = 0; /* Fail */
}
@ -538,11 +521,11 @@ out:
/* Free message memory. */
static void _free_message(struct message_data *message_data)
{
free(message_data->id);
free(message_data->dso_name);
free(message_data->device_uuid);
free(message_data->events_str);
free(message_data->timeout_str);
dm_free(message_data->id);
dm_free(message_data->dso_name);
dm_free(message_data->device_uuid);
dm_free(message_data->events_str);
dm_free(message_data->timeout_str);
}
/* Parse a register message from the client. */
@ -574,7 +557,7 @@ static int _parse_message(struct message_data *message_data)
ret = 1;
}
free(msg->data);
dm_free(msg->data);
msg->data = NULL;
return ret;
@ -608,8 +591,8 @@ static int _fill_device_data(struct thread_status *ts)
if (!dm_task_run(dmt))
goto fail;
free(ts->device.name);
if (!(ts->device.name = strdup(dm_task_get_name(dmt))))
dm_free(ts->device.name);
if (!(ts->device.name = dm_strdup(dm_task_get_name(dmt))))
goto fail;
if (!dm_task_get_info(dmt, &dmi))
@ -678,9 +661,6 @@ static int _get_status(struct message_data *message_data)
char **buffers;
char *message;
if (!message_data->id)
return -EINVAL;
_lock_mutex();
count = dm_list_size(&_thread_registry);
buffers = alloca(sizeof(char*) * count);
@ -699,8 +679,8 @@ static int _get_status(struct message_data *message_data)
len = strlen(message_data->id);
msg->size = size + len + 1;
free(msg->data);
if (!(msg->data = malloc(msg->size)))
dm_free(msg->data);
if (!(msg->data = dm_malloc(msg->size)))
goto out;
memcpy(msg->data, message_data->id, len);
@ -715,7 +695,7 @@ static int _get_status(struct message_data *message_data)
ret = 0;
out:
for (j = 0; j < i; ++j)
free(buffers[j]);
dm_free(buffers[j]);
return ret;
}
@ -724,7 +704,7 @@ static int _get_parameters(struct message_data *message_data) {
struct dm_event_daemon_message *msg = message_data->msg;
int size;
free(msg->data);
dm_free(msg->data);
if ((size = dm_asprintf(&msg->data, "%s pid=%d daemon=%s exec_method=%s",
message_data->id, getpid(),
_foreground ? "no" : "yes",
@ -755,9 +735,8 @@ static void _exit_timeout(void *unused __attribute__((unused)))
static void *_timeout_thread(void *unused __attribute__((unused)))
{
struct thread_status *thread;
struct timespec timeout, real_time;
struct timespec timeout;
time_t curr_time;
int ret;
DEBUGLOG("Timeout thread starting.");
pthread_cleanup_push(_exit_timeout, NULL);
@ -766,16 +745,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
while (!dm_list_empty(&_timeout_registry)) {
timeout.tv_sec = 0;
timeout.tv_nsec = 0;
#ifndef HAVE_REALTIME
curr_time = time(NULL);
#else
if (clock_gettime(CLOCK_REALTIME, &real_time)) {
log_error("Failed to read clock_gettime().");
break;
}
/* 10ms back to the future */
curr_time = real_time.tv_sec + ((real_time.tv_nsec > (1000000000 - 10000000)) ? 1 : 0);
#endif
dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) {
if (thread->next_time <= curr_time) {
@ -788,10 +758,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
} else {
DEBUGLOG("Sending SIGALRM to Thr %x for timeout.",
(int) thread->thread);
ret = pthread_kill(thread->thread, SIGALRM);
if (ret && (ret != ESRCH))
log_error("Unable to wakeup Thr %x for timeout: %s.",
(int) thread->thread, strerror(ret));
pthread_kill(thread->thread, SIGALRM);
}
_unlock_mutex();
}
@ -881,7 +848,6 @@ static int _event_wait(struct thread_status *thread)
* This is so that you can break out of waiting on an event,
* either for a timeout event, or to cancel the thread.
*/
sigemptyset(&old);
sigemptyset(&set);
sigaddset(&set, SIGALRM);
if (pthread_sigmask(SIG_UNBLOCK, &set, &old) != 0) {
@ -1075,7 +1041,6 @@ out:
* "label at end of compound statement" */
;
/* coverity[lock_order] _global_mutex is kept locked */
pthread_cleanup_pop(1);
return NULL;
@ -1238,7 +1203,7 @@ static int _registered_device(struct message_data *message_data,
int r;
struct dm_event_daemon_message *msg = message_data->msg;
free(msg->data);
dm_free(msg->data);
if ((r = dm_asprintf(&(msg->data), "%s %s %s %u",
message_data->id,
@ -1378,7 +1343,7 @@ static int _get_timeout(struct message_data *message_data)
if (!thread)
return -ENODEV;
free(msg->data);
dm_free(msg->data);
msg->size = dm_asprintf(&(msg->data), "%s %" PRIu32,
message_data->id, thread->timeout);
@ -1498,34 +1463,37 @@ static int _client_read(struct dm_event_fifos *fifos,
t.tv_usec = 0;
ret = select(fifos->client + 1, &fds, NULL, NULL, &t);
if (!ret && bytes)
continue; /* trying to finish read */
if (!ret && !bytes) /* nothing to read */
return 0;
if (ret <= 0) /* nothing to read */
goto bad;
if (!ret) /* trying to finish read */
continue;
if (ret < 0) /* error */
return 0;
ret = read(fifos->client, buf + bytes, size - bytes);
bytes += ret > 0 ? ret : 0;
if (!msg->data && (bytes == 2 * sizeof(uint32_t))) {
if (header && (bytes == 2 * sizeof(uint32_t))) {
msg->cmd = ntohl(header[0]);
size = msg->size = ntohl(header[1]);
bytes = 0;
if (!(size = msg->size = ntohl(header[1])))
break;
if (!(buf = msg->data = malloc(msg->size)))
goto bad;
if (!size)
break; /* No data -> error */
buf = msg->data = dm_malloc(msg->size);
if (!buf)
break; /* No mem -> error */
header = 0;
}
}
if (bytes == size)
return 1;
if (bytes != size) {
dm_free(msg->data);
msg->data = NULL;
return 0;
}
bad:
free(msg->data);
msg->data = NULL;
return 0;
return 1;
}
/*
@ -1540,7 +1508,7 @@ static int _client_write(struct dm_event_fifos *fifos,
fd_set fds;
size_t size = 2 * sizeof(uint32_t) + ((msg->data) ? msg->size : 0);
uint32_t *header = malloc(size);
uint32_t *header = dm_malloc(size);
char *buf = (char *)header;
if (!header) {
@ -1570,7 +1538,7 @@ static int _client_write(struct dm_event_fifos *fifos,
}
if (header != temp)
free(header);
dm_free(header);
return (bytes == size);
}
@ -1632,7 +1600,7 @@ static int _do_process_request(struct dm_event_daemon_message *msg)
msg->size = dm_asprintf(&(msg->data), "%s %s %d", answer,
(msg->cmd == DM_EVENT_CMD_DIE) ? "DYING" : "HELLO",
DM_EVENT_PROTOCOL_VERSION);
free(answer);
dm_free(answer);
}
} else if (msg->cmd != DM_EVENT_CMD_ACTIVE && !_parse_message(&message_data)) {
stack;
@ -1674,7 +1642,7 @@ static void _process_request(struct dm_event_fifos *fifos)
DEBUGLOG("<<< CMD:%s (0x%x) completed (result %d).", decode_cmd(cmd), cmd, msg.cmd);
free(msg.data);
dm_free(msg.data);
if (cmd == DM_EVENT_CMD_DIE) {
if (unlink(DMEVENTD_PIDFILE))
@ -1746,8 +1714,7 @@ static void _init_thread_signals(void)
sigset_t my_sigset;
struct sigaction act = { .sa_handler = _sig_alarm };
if (sigaction(SIGALRM, &act, NULL))
log_sys_debug("sigaction", "SIGLARM");
sigaction(SIGALRM, &act, NULL);
sigfillset(&my_sigset);
/* These are used for exiting */
@ -1756,8 +1723,7 @@ static void _init_thread_signals(void)
sigdelset(&my_sigset, SIGHUP);
sigdelset(&my_sigset, SIGQUIT);
if (pthread_sigmask(SIG_BLOCK, &my_sigset, NULL))
log_sys_error("pthread_sigmask", "SIG_BLOCK");
pthread_sigmask(SIG_BLOCK, &my_sigset, NULL);
}
/*
@ -1769,7 +1735,7 @@ static void _init_thread_signals(void)
*/
static void _exit_handler(int sig __attribute__((unused)))
{
_exit_now = DM_SIGNALED_EXIT;
_exit_now = 1;
}
#ifdef __linux__
@ -1987,7 +1953,7 @@ static int _reinstate_registrations(struct dm_event_fifos *fifos)
int i, ret;
ret = daemon_talk(fifos, &msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0);
free(msg.data);
dm_free(msg.data);
msg.data = NULL;
if (ret) {
@ -2033,8 +1999,8 @@ static int _reinstate_registrations(struct dm_event_fifos *fifos)
static void _restart_dmeventd(void)
{
struct dm_event_fifos fifos = {
.client = -1,
.server = -1,
.client = -1,
/* FIXME Make these either configurable or depend directly on dmeventd_path */
.client_path = DM_EVENT_FIFO_CLIENT,
.server_path = DM_EVENT_FIFO_SERVER
@ -2073,18 +2039,19 @@ static void _restart_dmeventd(void)
++count;
}
if (!(_initial_registrations = zalloc(sizeof(char*) * (count + 1)))) {
if (!(_initial_registrations = dm_malloc(sizeof(char*) * (count + 1)))) {
fprintf(stderr, "Memory allocation registration failed.\n");
goto bad;
}
for (i = 0; i < count; ++i) {
if (!(_initial_registrations[i] = strdup(message))) {
if (!(_initial_registrations[i] = dm_strdup(message))) {
fprintf(stderr, "Memory allocation for message failed.\n");
goto bad;
}
message += strlen(message) + 1;
}
_initial_registrations[count] = NULL;
if (version >= 2) {
if (daemon_talk(&fifos, &msg, DM_EVENT_CMD_GET_PARAMETERS, "-", "-", 0, 0)) {
@ -2224,7 +2191,7 @@ int main(int argc, char *argv[])
openlog("dmeventd", LOG_PID, LOG_DAEMON);
dm_event_log_set(_debug_level, _use_syslog);
dm_log_with_errno_init(_libdm_log);
dm_log_init(_dmeventd_log);
(void) dm_prepare_selinux_context(DMEVENTD_PIDFILE, S_IFREG);
if (dm_create_lockfile(DMEVENTD_PIDFILE) == 0)
@ -2247,8 +2214,7 @@ int main(int argc, char *argv[])
_init_thread_signals();
if (pthread_mutex_init(&_global_mutex, NULL))
exit(EXIT_FAILURE);
pthread_mutex_init(&_global_mutex, NULL);
if (!_systemd_activation && !_open_fifos(&fifos))
exit(EXIT_FIFO_FAILURE);
@ -2267,14 +2233,11 @@ int main(int argc, char *argv[])
for (;;) {
if (_idle_since) {
if (_exit_now) {
if (_exit_now == DM_SCHEDULED_EXIT)
break; /* Only prints shutdown message */
log_info("dmeventd detected break while being idle "
"for %ld second(s), exiting.",
(long) (time(NULL) - _idle_since));
break;
}
if (idle_exit_timeout) {
} else if (idle_exit_timeout) {
now = time(NULL);
if (now < _idle_since)
_idle_since = now; /* clock change? */
@ -2285,14 +2248,15 @@ int main(int argc, char *argv[])
break;
}
}
} else if (_exit_now == DM_SIGNALED_EXIT) {
_exit_now = DM_SCHEDULED_EXIT;
} else if (_exit_now) {
_exit_now = 0;
/*
* When '_exit_now' is set, signal has been received,
* but can not simply exit unless all
* threads are done processing.
*/
log_info("dmeventd received break, scheduling exit.");
log_warn("WARNING: There are still devices being monitored.");
log_warn("WARNING: Refusing to exit.");
}
_process_request(&fifos);
_cleanup_unused_threads();

View File

@ -12,12 +12,10 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dm-logging.h"
#include "dmlib.h"
#include "libdevmapper-event.h"
#include "dmeventd.h"
#include "libdm/misc/dm-logging.h"
#include "base/memory/zalloc.h"
#include "lib/misc/intl.h"
#include <fcntl.h>
#include <sys/file.h>
@ -27,7 +25,6 @@
#include <arpa/inet.h> /* for htonl, ntohl */
#include <pthread.h>
#include <syslog.h>
#include <unistd.h>
static int _debug_level = 0;
static int _use_syslog = 0;
@ -50,8 +47,8 @@ struct dm_event_handler {
static void _dm_event_handler_clear_dev_info(struct dm_event_handler *dmevh)
{
free(dmevh->dev_name);
free(dmevh->uuid);
dm_free(dmevh->dev_name);
dm_free(dmevh->uuid);
dmevh->dev_name = dmevh->uuid = NULL;
dmevh->major = dmevh->minor = 0;
}
@ -60,7 +57,7 @@ struct dm_event_handler *dm_event_handler_create(void)
{
struct dm_event_handler *dmevh;
if (!(dmevh = zalloc(sizeof(*dmevh)))) {
if (!(dmevh = dm_zalloc(sizeof(*dmevh)))) {
log_error("Failed to allocate event handler.");
return NULL;
}
@ -71,9 +68,9 @@ struct dm_event_handler *dm_event_handler_create(void)
void dm_event_handler_destroy(struct dm_event_handler *dmevh)
{
_dm_event_handler_clear_dev_info(dmevh);
free(dmevh->dso);
free(dmevh->dmeventd_path);
free(dmevh);
dm_free(dmevh->dso);
dm_free(dmevh->dmeventd_path);
dm_free(dmevh);
}
int dm_event_handler_set_dmeventd_path(struct dm_event_handler *dmevh, const char *dmeventd_path)
@ -81,9 +78,9 @@ int dm_event_handler_set_dmeventd_path(struct dm_event_handler *dmevh, const cha
if (!dmeventd_path) /* noop */
return 0;
free(dmevh->dmeventd_path);
dm_free(dmevh->dmeventd_path);
if (!(dmevh->dmeventd_path = strdup(dmeventd_path)))
if (!(dmevh->dmeventd_path = dm_strdup(dmeventd_path)))
return -ENOMEM;
return 0;
@ -94,9 +91,9 @@ int dm_event_handler_set_dso(struct dm_event_handler *dmevh, const char *path)
if (!path) /* noop */
return 0;
free(dmevh->dso);
dm_free(dmevh->dso);
if (!(dmevh->dso = strdup(path)))
if (!(dmevh->dso = dm_strdup(path)))
return -ENOMEM;
return 0;
@ -109,7 +106,7 @@ int dm_event_handler_set_dev_name(struct dm_event_handler *dmevh, const char *de
_dm_event_handler_clear_dev_info(dmevh);
if (!(dmevh->dev_name = strdup(dev_name)))
if (!(dmevh->dev_name = dm_strdup(dev_name)))
return -ENOMEM;
return 0;
@ -122,7 +119,7 @@ int dm_event_handler_set_uuid(struct dm_event_handler *dmevh, const char *uuid)
_dm_event_handler_clear_dev_info(dmevh);
if (!(dmevh->uuid = strdup(uuid)))
if (!(dmevh->uuid = dm_strdup(uuid)))
return -ENOMEM;
return 0;
@ -237,50 +234,44 @@ static int _daemon_read(struct dm_event_fifos *fifos,
ret = select(fifos->server + 1, &fds, NULL, NULL, &tval);
if (ret < 0 && errno != EINTR) {
log_error("Unable to read from event server.");
goto bad;
return 0;
}
if ((ret == 0) && (i > 4) && !bytes) {
log_error("No input from event server.");
goto bad;
return 0;
}
}
if (ret < 1) {
log_error("Unable to read from event server.");
goto bad;
return 0;
}
ret = read(fifos->server, buf + bytes, size);
if (ret < 0) {
if ((errno == EINTR) || (errno == EAGAIN))
continue;
log_error("Unable to read from event server.");
goto bad;
}
bytes += ret;
if (!msg->data && (bytes == 2 * sizeof(uint32_t))) {
msg->cmd = ntohl(header[0]);
bytes = 0;
if (!(size = msg->size = ntohl(header[1])))
break;
if (!(buf = msg->data = malloc(msg->size))) {
log_error("Unable to allocate message data.");
else {
log_error("Unable to read from event server.");
return 0;
}
}
bytes += ret;
if (header && (bytes == 2 * sizeof(uint32_t))) {
msg->cmd = ntohl(header[0]);
msg->size = ntohl(header[1]);
buf = msg->data = dm_malloc(msg->size);
size = msg->size;
bytes = 0;
header = 0;
}
}
if (bytes == size)
return 1;
bad:
free(msg->data);
msg->data = NULL;
return 0;
if (bytes != size) {
dm_free(msg->data);
msg->data = NULL;
}
return bytes == size;
}
/* Write message to daemon. */
@ -338,9 +329,10 @@ static int _daemon_write(struct dm_event_fifos *fifos,
if (ret < 0) {
if ((errno == EINTR) || (errno == EAGAIN))
continue;
log_error("Unable to talk to event daemon.");
return 0;
else {
log_error("Unable to talk to event daemon.");
return 0;
}
}
bytes += ret;
@ -380,13 +372,13 @@ int daemon_talk(struct dm_event_fifos *fifos,
*/
if (!_daemon_write(fifos, msg)) {
stack;
free(msg->data);
dm_free(msg->data);
msg->data = NULL;
return -EIO;
}
do {
free(msg->data);
dm_free(msg->data);
msg->data = NULL;
if (!_daemon_read(fifos, msg)) {
@ -462,8 +454,7 @@ static int _start_daemon(char *dmeventd_path, struct dm_event_fifos *fifos)
if (close(fifos->client))
log_sys_debug("close", fifos->client_path);
return 1;
}
if (errno != ENXIO && errno != ENOENT) {
} else if (errno != ENXIO && errno != ENOENT) {
/* problem */
log_sys_error("open", fifos->client_path);
return 0;
@ -615,8 +606,8 @@ static int _do_event(int cmd, char *dmeventd_path, struct dm_event_daemon_messag
{
int ret;
struct dm_event_fifos fifos = {
.client = -1,
.server = -1,
.client = -1,
/* FIXME Make these either configurable or depend directly on dmeventd_path */
.client_path = DM_EVENT_FIFO_CLIENT,
.server_path = DM_EVENT_FIFO_SERVER
@ -629,7 +620,7 @@ static int _do_event(int cmd, char *dmeventd_path, struct dm_event_daemon_messag
ret = daemon_talk(&fifos, msg, DM_EVENT_CMD_HELLO, NULL, NULL, 0, 0);
free(msg->data);
dm_free(msg->data);
msg->data = 0;
if (!ret)
@ -655,7 +646,6 @@ int dm_event_register_handler(const struct dm_event_handler *dmevh)
uuid = dm_task_get_uuid(dmt);
if (!strstr(dmevh->dso, "libdevmapper-event-lvm2thin.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2vdo.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2snapshot.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2mirror.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2raid.so"))
@ -670,7 +660,7 @@ int dm_event_register_handler(const struct dm_event_handler *dmevh)
ret = 0;
}
free(msg.data);
dm_free(msg.data);
dm_task_destroy(dmt);
@ -697,7 +687,7 @@ int dm_event_unregister_handler(const struct dm_event_handler *dmevh)
ret = 0;
}
free(msg.data);
dm_free(msg.data);
dm_task_destroy(dmt);
@ -709,11 +699,15 @@ int dm_event_unregister_handler(const struct dm_event_handler *dmevh)
static char *_fetch_string(char **src, const int delimiter)
{
char *p, *ret;
size_t len = (p = strchr(*src, delimiter)) ?
(size_t)(p - *src) : strlen(*src);
if ((ret = strndup(*src, len)))
*src += len + 1;
if ((p = strchr(*src, delimiter)))
*p = 0;
if ((ret = dm_strdup(*src)))
*src += strlen(ret) + 1;
if (p)
*p = delimiter;
return ret;
}
@ -729,11 +723,11 @@ static int _parse_message(struct dm_event_daemon_message *msg, char **dso_name,
(*dso_name = _fetch_string(&p, ' ')) &&
(*uuid = _fetch_string(&p, ' '))) {
*evmask = atoi(p);
free(id);
dm_free(id);
return 0;
}
free(id);
dm_free(id);
return -ENOMEM;
}
@ -761,10 +755,11 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
uuid = dm_task_get_uuid(dmt);
/* FIXME Distinguish errors connecting to daemon */
if ((ret = _do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0))) {
if (_do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0)) {
log_debug("%s: device not registered.", dm_task_get_name(dmt));
ret = -ENOENT;
goto fail;
}
@ -775,7 +770,7 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
dm_task_destroy(dmt);
dmt = NULL;
free(msg.data);
dm_free(msg.data);
msg.data = NULL;
_dm_event_handler_clear_dev_info(dmevh);
@ -784,7 +779,7 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
goto fail;
}
if (!(dmevh->uuid = strdup(reply_uuid))) {
if (!(dmevh->uuid = dm_strdup(reply_uuid))) {
ret = -ENOMEM;
goto fail;
}
@ -797,13 +792,13 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
dm_event_handler_set_dso(dmevh, reply_dso);
dm_event_handler_set_event_mask(dmevh, reply_mask);
free(reply_dso);
dm_free(reply_dso);
reply_dso = NULL;
free(reply_uuid);
dm_free(reply_uuid);
reply_uuid = NULL;
if (!(dmevh->dev_name = strdup(dm_task_get_name(dmt)))) {
if (!(dmevh->dev_name = dm_strdup(dm_task_get_name(dmt)))) {
ret = -ENOMEM;
goto fail;
}
@ -821,9 +816,9 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
return ret;
fail:
free(msg.data);
free(reply_dso);
free(reply_uuid);
dm_free(msg.data);
dm_free(reply_dso);
dm_free(reply_uuid);
_dm_event_handler_clear_dev_info(dmevh);
if (dmt)
dm_task_destroy(dmt);
@ -870,38 +865,28 @@ void dm_event_log(const char *subsys, int level, const char *file,
int line, int dm_errno_or_class,
const char *format, va_list ap)
{
static int _abort_on_internal_errors = -1;
static pthread_mutex_t _log_mutex = PTHREAD_MUTEX_INITIALIZER;
static time_t start = 0;
const char *indent = "";
FILE *stream = log_stderr(level) ? stderr : stdout;
FILE *stream = stdout;
int prio;
time_t now;
int log_with_debug = 0;
if (subsys[0] == '#') {
/* Subsystems starting with '#' are logged
* only when debugging is enabled. */
log_with_debug++;
subsys++;
}
switch (log_level(level)) {
switch (level & ~(_LOG_STDERR | _LOG_ONCE)) {
case _LOG_DEBUG:
/* Never shown without -ddd */
if (_debug_level < 3)
return;
prio = LOG_DEBUG;
indent = " ";
break;
case _LOG_INFO:
if (log_with_debug && _debug_level < 2)
if (_debug_level < 2)
return;
prio = LOG_INFO;
indent = " ";
break;
case _LOG_NOTICE:
if (log_with_debug && _debug_level < 1)
if (_debug_level < 1)
return;
prio = LOG_NOTICE;
indent = " ";
@ -927,13 +912,12 @@ void dm_event_log(const char *subsys, int level, const char *file,
if (!start)
start = now;
now -= start;
if (_debug_level)
fprintf(stream, "[%2d:%02d] %8x:%-6s%s",
(int)now / 60, (int)now % 60,
// TODO: Maybe use shorter ID
// ((int)(pthread_self()) >> 6) & 0xffff,
(int)pthread_self(), subsys,
(_debug_level > 3) ? "" : indent);
fprintf(stream, "[%2d:%02d] %8x:%-6s%s",
(int)now / 60, (int)now % 60,
// TODO: Maybe use shorter ID
// ((int)(pthread_self()) >> 6) & 0xffff,
(int)pthread_self(), subsys,
(_debug_level > 3) ? "" : indent);
if (_debug_level > 3)
fprintf(stream, "%28s:%4d %s", file, line, indent);
vfprintf(stream, _(format), ap);
@ -942,15 +926,6 @@ void dm_event_log(const char *subsys, int level, const char *file,
}
pthread_mutex_unlock(&_log_mutex);
if (_abort_on_internal_errors < 0)
/* Set when env DM_ABORT_ON_INTERNAL_ERRORS is not "0" */
_abort_on_internal_errors =
strcmp(getenv("DM_ABORT_ON_INTERNAL_ERRORS") ? : "0", "0");
if (_abort_on_internal_errors &&
!strncmp(format, INTERNAL_ERROR, sizeof(INTERNAL_ERROR) - 1))
abort();
}
#if 0 /* left out for now */
@ -988,12 +963,12 @@ int dm_event_get_timeout(const char *device_path, uint32_t *timeout)
if (!p) {
log_error("Malformed reply from dmeventd '%s'.",
msg.data);
free(msg.data);
dm_free(msg.data);
return -EIO;
}
*timeout = atoi(p);
}
free(msg.data);
dm_free(msg.data);
return ret;
}

View File

@ -21,7 +21,6 @@
#ifndef LIB_DMEVENT_H
#define LIB_DMEVENT_H
#include <stdarg.h>
#include <stdint.h>
/*

View File

@ -8,3 +8,4 @@ Description: device-mapper event library
Version: @DM_LIB_PATCHLEVEL@
Cflags: -I${includedir}
Libs: -L${libdir} -ldevmapper-event
Requires.private: devmapper

View File

@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2005, 2011 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@ -16,7 +16,27 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SUBDIRS += lvm2 snapshot raid thin mirror vdo
SUBDIRS += lvm2
ifneq ("@MIRRORS@", "none")
SUBDIRS += mirror
endif
ifneq ("@SNAPSHOTS@", "none")
SUBDIRS += snapshot
endif
ifneq ("@RAID@", "none")
SUBDIRS += raid
endif
ifneq ("@THIN@", "none")
SUBDIRS += thin
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = lvm2 mirror snapshot raid thin
endif
include $(top_builddir)/make.tmpl
@ -24,4 +44,3 @@ snapshot: lvm2
mirror: lvm2
raid: lvm2
thin: lvm2
vdo: lvm2

View File

@ -16,7 +16,6 @@ top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
CLDFLAGS += -L$(top_builddir)/tools
LIBS += $(DMEVENT_LIBS) $(PTHREAD_LIBS) @LVM2CMD_LIB@
SOURCES = dmeventd_lvm.c
@ -25,6 +24,8 @@ LIB_VERSION = $(LIB_VERSION_LVM)
include $(top_builddir)/make.tmpl
LIBS += @LVM2CMD_LIB@ -ldevmapper $(PTHREAD_LIBS)
install_lvm2: install_lib_shared
install: install_lvm2

View File

@ -12,10 +12,10 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "lib.h"
#include "dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
#include "tools/lvm2cmd.h"
#include "libdevmapper-event.h"
#include "lvm2cmd.h"
#include <pthread.h>
@ -31,15 +31,8 @@ static pthread_mutex_t _register_mutex = PTHREAD_MUTEX_INITIALIZER;
static int _register_count = 0;
static struct dm_pool *_mem_pool = NULL;
static void *_lvm_handle = NULL;
static DM_LIST_INIT(_env_registry);
struct env_data {
struct dm_list list;
const char *cmd;
const char *data;
};
DM_EVENT_LOG_FN("#lvm")
DM_EVENT_LOG_FN("lvm")
static void _lvm2_print_log(int level, const char *file, int line,
int dm_errno_or_class, const char *msg)
@ -71,7 +64,7 @@ int dmeventd_lvm2_init(void)
if (!_lvm_handle) {
lvm2_log_fn(_lvm2_print_log);
if (!(_lvm_handle = lvm2_init_threaded()))
if (!(_lvm_handle = lvm2_init()))
goto out;
/*
@ -107,7 +100,6 @@ void dmeventd_lvm2_exit(void)
lvm2_run(_lvm_handle, "_memlock_dec");
dm_pool_destroy(_mem_pool);
_mem_pool = NULL;
dm_list_init(&_env_registry);
lvm2_exit(_lvm_handle);
_lvm_handle = NULL;
log_debug("lvm plugin exited.");
@ -129,11 +121,8 @@ int dmeventd_lvm2_run(const char *cmdline)
int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
const char *cmd, const char *device)
{
static char _internal_prefix[] = "_dmeventd_";
char *vg = NULL, *lv = NULL, *layer;
int r;
struct env_data *env_data;
const char *env = NULL;
if (!dm_split_lvm_name(mem, device, &vg, &lv, &layer)) {
log_error("Unable to determine VG name from %s.",
@ -146,39 +135,6 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
(layer = strstr(lv, "_mlog")))
*layer = '\0';
if (!strncmp(cmd, _internal_prefix, sizeof(_internal_prefix) - 1)) {
/* check if ENVVAR wasn't already resolved */
dm_list_iterate_items(env_data, &_env_registry)
if (!strcmp(cmd, env_data->cmd)) {
env = env_data->data;
break;
}
if (!env) {
/* run lvm2 command to find out setting value */
dmeventd_lvm2_lock();
if (!dmeventd_lvm2_run(cmd) ||
!(env = getenv(cmd))) {
dmeventd_lvm2_unlock();
log_error("Unable to find configured command.");
return 0;
}
/* output of internal command passed via env var */
env = dm_pool_strdup(_mem_pool, env); /* copy with lock */
dmeventd_lvm2_unlock();
if (!env ||
!(env_data = dm_pool_zalloc(_mem_pool, sizeof(*env_data))) ||
!(env_data->cmd = dm_pool_strdup(_mem_pool, cmd))) {
log_error("Unable to allocate env memory.");
return 0;
}
env_data->data = env;
/* add to ENVVAR registry */
dm_list_add(&_env_registry, &env_data->list);
}
cmd = env;
}
r = dm_snprintf(buffer, size, "%s %s/%s", cmd, vg, lv);
dm_pool_free(mem, vg);

View File

@ -16,8 +16,8 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2
LIBS += -ldevmapper-event-lvm2
SOURCES = dmeventd_mirror.c
@ -25,8 +25,13 @@ LIB_NAME = libdevmapper-event-lvm2mirror
LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
include $(top_builddir)/make.tmpl
LIBS += -ldevmapper-event-lvm2 -ldevmapper
install_lvm2: install_dm_plugin
install: install_lvm2

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -12,10 +12,10 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
#include "lib/activate/activate.h"
#include "lib.h"
#include "libdevmapper-event.h"
#include "dmeventd_lvm.h"
#include "activate.h" /* For TARGET_NAME* */
/* FIXME Reformat to 80 char lines. */
@ -25,6 +25,7 @@
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
};
@ -72,10 +73,8 @@ static int _get_mirror_event(struct dso_state *state, char *params)
unsigned i;
struct dm_status_mirror *ms;
if (!dm_get_status_mirror(state->mem, params, &ms)) {
log_error("Unable to parse mirror status string.");
return ME_IGNORE;
}
if (!dm_get_status_mirror(state->mem, params, &ms))
goto_out;
/* Check for bad mirror devices */
for (i = 0; i < ms->dev_count; ++i)
@ -96,19 +95,27 @@ static int _get_mirror_event(struct dso_state *state, char *params)
dm_pool_free(state->mem, ms);
return r;
out:
log_error("Unable to parse mirror status string.");
return ME_IGNORE;
}
static int _remove_failed_devices(const char *cmd_lvconvert, const char *device)
static int _remove_failed_devices(const char *cmd_lvscan, const char *cmd_lvconvert)
{
int r;
if (!dmeventd_lvm2_run_with_lock(cmd_lvscan))
log_info("Re-scan of mirrored device failed.");
/* if repair goes OK, report success even if lvscan has failed */
if (!dmeventd_lvm2_run_with_lock(cmd_lvconvert)) {
log_error("Repair of mirrored device %s failed.", device);
return 0;
}
r = dmeventd_lvm2_run_with_lock(cmd_lvconvert);
log_info("Repair of mirrored device %s finished successfully.", device);
log_info("Repair of mirrored device %s.",
(r) ? "finished successfully" : "failed");
return 1;
return r;
}
void process_event(struct dm_task *dmt,
@ -146,7 +153,8 @@ void process_event(struct dm_task *dmt,
break;
case ME_FAILURE:
log_error("Device failure in %s.", device);
if (!_remove_failed_devices(state->cmd_lvconvert, device))
if (!_remove_failed_devices(state->cmd_lvscan,
state->cmd_lvconvert))
/* FIXME Why are all the error return codes unused? Get rid of them? */
log_error("Failed to remove faulty devices in %s.",
device);
@ -160,7 +168,7 @@ void process_event(struct dm_task *dmt,
break;
default:
/* FIXME Provide value then! */
log_warn("WARNING: %s received unknown event.", device);
log_info("Unknown event received.");
}
} while (next);
}
@ -176,10 +184,17 @@ int register_device(const char *device,
if (!dmeventd_lvm2_init_with_pool("mirror_state", state))
goto_bad;
/* CANNOT use --config as this disables cached content */
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
@ -189,9 +204,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor mirror %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@ -15,8 +15,8 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2
LIBS += -ldevmapper-event-lvm2
SOURCES = dmeventd_raid.c
@ -24,8 +24,13 @@ LIB_NAME = libdevmapper-event-lvm2raid
LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
include $(top_builddir)/make.tmpl
LIBS += -ldevmapper-event-lvm2 -ldevmapper
install_lvm2: install_dm_plugin
install: install_lvm2

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -12,16 +12,17 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
#include "lib/config/defaults.h"
#include "lib.h"
#include "defaults.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
/* Hold enough elements for the mximum number of RAID images */
#define RAID_DEVS_ELEMS ((DEFAULT_RAID_MAX_IMAGES + 63) / 64)
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
uint64_t raid_devs[RAID_DEVS_ELEMS];
int failed;
@ -37,7 +38,6 @@ static int _process_raid_event(struct dso_state *state, char *params, const char
struct dm_status_raid *status;
const char *d;
int dead = 0, r = 1;
uint32_t dev;
if (!dm_get_status_raid(state->mem, params, &status)) {
log_error("Failed to process status line for %s.", device);
@ -46,63 +46,41 @@ static int _process_raid_event(struct dso_state *state, char *params, const char
d = status->dev_health;
while ((d = strchr(d, 'D'))) {
dev = (uint32_t)(d - status->dev_health);
uint32_t dev = (uint32_t)(d - status->dev_health);
if (!(state->raid_devs[dev / 64] & (UINT64_C(1) << (dev % 64)))) {
state->raid_devs[dev / 64] |= (UINT64_C(1) << (dev % 64));
log_warn("WARNING: Device #%u of %s array, %s, has failed.",
dev, status->raid_type, device);
}
if (!(state->raid_devs[dev / 64] & (UINT64_C(1) << (dev % 64))))
log_error("Device #%u of %s array, %s, has failed.",
dev, status->raid_type, device);
state->raid_devs[dev / 64] |= (UINT64_C(1) << (dev % 64));
d++;
dead = 1;
}
/*
* if we are converting from non-RAID to RAID (e.g. linear -> raid1)
* and too many original devices die, such that we cannot continue
* the "recover" operation, the sync action will go to "idle", the
* unsynced devs will remain at 'a', and the original devices will
* NOT SWITCH TO 'D', but will remain at 'A' - hoping to be revived.
*
* This is simply the way the kernel works...
*/
if (!strcmp(status->sync_action, "idle") &&
(status->dev_health[0] == 'a') &&
(status->insync_regions < status->total_regions)) {
log_error("Primary sources for new RAID, %s, have failed.",
device);
dead = 1; /* run it through LVM repair */
}
if (dead) {
/*
* Use the first event to run a repair ignoring any additional ones.
*
* We presume lvconvert to do pre-repair
* checks to avoid bloat in this plugin.
*/
if (!state->warned && status->insync_regions < status->total_regions) {
if (status->insync_regions < status->total_regions) {
if (!state->warned)
log_warn("WARNING: waiting for resynchronization to finish "
"before initiating repair on RAID device %s", device);
state->warned = 1;
log_warn("WARNING: waiting for resynchronization to finish "
"before initiating repair on RAID device %s.", device);
/* Fall through to allow lvconvert to run. */
goto out; /* Not yet done syncing with accessible devices */
}
if (state->failed)
goto out; /* already reported */
state->failed = 1;
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvscan))
log_warn("WARNING: Re-scan of RAID device %s failed.", device);
/* if repair goes OK, report success even if lvscan has failed */
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvconvert)) {
log_error("Repair of RAID device %s failed.", device);
log_info("Repair of RAID device %s failed.", device);
r = 0;
}
} else {
state->failed = 0;
if (status->insync_regions == status->total_regions)
memset(&state->raid_devs, 0, sizeof(state->raid_devs));
log_info("%s array, %s, is %s in-sync.",
status->raid_type, device,
(status->insync_regions == status->total_regions) ? "now" : "not");
@ -155,9 +133,14 @@ int register_device(const char *device,
if (!dmeventd_lvm2_init_with_pool("raid_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device) ||
!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
@ -167,9 +150,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor RAID %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@ -16,8 +16,8 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2
LIBS += -ldevmapper-event-lvm2
SOURCES = dmeventd_snapshot.c
@ -26,6 +26,8 @@ LIB_VERSION = $(LIB_VERSION_LVM)
include $(top_builddir)/make.tmpl
LIBS += -ldevmapper-event-lvm2 -ldevmapper
install_lvm2: install_dm_plugin
install: install_lvm2

View File

@ -12,9 +12,9 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
#include "lib.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include <sys/sysmacros.h>
#include <sys/wait.h>
@ -175,7 +175,6 @@ void process_event(struct dm_task *dmt,
const char *device = dm_task_get_name(dmt);
int percent;
struct dm_info info;
int ret;
/* No longer monitoring, waiting for remove */
if (!state->percent_check)
@ -206,8 +205,7 @@ void process_event(struct dm_task *dmt,
/* Maybe configurable ? */
_remove(dm_task_get_uuid(dmt));
#endif
if ((ret = pthread_kill(pthread_self(), SIGALRM)) && (ret != ESRCH))
log_sys_error("pthread_kill", "self");
pthread_kill(pthread_self(), SIGALRM);
goto out;
}
@ -215,8 +213,7 @@ void process_event(struct dm_task *dmt,
/* TODO eventually recognize earlier when room is enough */
log_info("Dropping monitoring of fully provisioned snapshot %s.",
device);
if ((ret = pthread_kill(pthread_self(), SIGALRM)) && (ret != ESRCH))
log_sys_error("pthread_kill", "self");
pthread_kill(pthread_self(), SIGALRM);
goto out;
}
@ -234,7 +231,7 @@ void process_event(struct dm_task *dmt,
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Snapshot %s is now %.2f%% full.",
device, dm_percent_to_round_float(percent, 2));
device, dm_percent_to_float(percent));
/* Try to extend the snapshot, in accord with user-set policies */
if (!_extend(state->cmd_lvextend))
@ -257,8 +254,10 @@ int register_device(const char *device,
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend,
sizeof(state->cmd_lvextend),
"lvextend --use-policies", device))
"lvextend --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
state->percent_check = CHECK_MINIMUM;
*user = state;
@ -269,9 +268,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor snapshot %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@ -15,8 +15,8 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2
LIBS += -ldevmapper-event-lvm2
SOURCES = dmeventd_thin.c
@ -24,8 +24,13 @@ LIB_NAME = libdevmapper-event-lvm2thin
LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX)
LIB_VERSION = $(LIB_VERSION_LVM)
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
include $(top_builddir)/make.tmpl
LIBS += -ldevmapper-event-lvm2 -ldevmapper
install_lvm2: install_dm_plugin
install: install_lvm2

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -12,16 +12,17 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
#include "lib.h" /* using here lvm log */
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include <sys/wait.h>
#include <stdarg.h>
#include <pthread.h>
/* TODO - move this mountinfo code into library to be reusable */
#ifdef __linux__
# include "libdm/misc/kdev_t.h"
# include "kdev_t.h"
#else
# define MAJOR(x) major((x))
# define MINOR(x) minor((x))
@ -39,118 +40,277 @@
#define UMOUNT_COMMAND "/bin/umount"
#define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */
#define MAX_FAILS (10)
#define THIN_DEBUG 0
struct dso_state {
struct dm_pool *mem;
int metadata_percent_check;
int metadata_percent;
int data_percent_check;
int data_percent;
uint64_t known_metadata_size;
uint64_t known_data_size;
unsigned fails;
unsigned max_fails;
int restore_sigset;
sigset_t old_sigset;
pid_t pid;
char *argv[3];
char *cmd_str;
char cmd_str[1024];
};
DM_EVENT_LOG_FN("thin")
static int _run_command(struct dso_state *state)
#define UUID_PREFIX "LVM-"
/* Figure out device UUID has LVM- prefix and is OPEN */
static int _has_unmountable_prefix(int major, int minor)
{
char val[16];
int i;
struct dm_task *dmt;
struct dm_info info;
const char *uuid;
int r = 0;
/* Mark for possible lvm2 command we are running from dmeventd
* lvm2 will not try to talk back to dmeventd while processing it */
(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
return_0;
if (state->data_percent) {
/* Prepare some known data to env vars for easy use */
if (dm_snprintf(val, sizeof(val), "%d",
state->data_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_DATA", val, 1);
if (dm_snprintf(val, sizeof(val), "%d",
state->metadata_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_METADATA", val, 1);
} else {
/* For an error event it's for a user to check status and decide */
log_debug("Error event processing.");
if (!dm_task_set_major_minor(dmt, major, minor, 1))
goto_out;
if (!dm_task_no_flush(dmt))
stack;
if (!dm_task_run(dmt))
goto out;
if (!dm_task_get_info(dmt, &info))
goto out;
if (!info.exists || !info.open_count)
goto out; /* Not open -> not mounted */
if (!(uuid = dm_task_get_uuid(dmt)))
goto out;
/* Check it's public mountable LV
* has prefix LVM- and UUID size is 68 chars */
if (memcmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1) ||
strlen(uuid) != 68)
goto out;
#if THIN_DEBUG
log_debug("Found logical volume %s (%u:%u).", uuid, major, minor);
#endif
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/* Get dependencies for device, and try to find matching device */
static int _has_deps(const char *name, int tp_major, int tp_minor, int *dev_minor)
{
struct dm_task *dmt;
const struct dm_deps *deps;
struct dm_info info;
int major, minor;
int r = 0;
if (!(dmt = dm_task_create(DM_DEVICE_DEPS)))
return 0;
if (!dm_task_set_name(dmt, name))
goto out;
if (!dm_task_no_open_count(dmt))
goto out;
if (!dm_task_run(dmt))
goto out;
if (!dm_task_get_info(dmt, &info))
goto out;
if (!(deps = dm_task_get_deps(dmt)))
goto out;
if (!info.exists || deps->count != 1)
goto out;
major = (int) MAJOR(deps->device[0]);
minor = (int) MINOR(deps->device[0]);
if ((major != tp_major) || (minor != tp_minor))
goto out;
*dev_minor = info.minor;
if (!_has_unmountable_prefix(major, info.minor))
goto out;
#if THIN_DEBUG
{
char dev_name[PATH_MAX];
if (dm_device_get_name(major, minor, 0, dev_name, sizeof(dev_name)))
log_debug("Found %s (%u:%u) depends on %s.",
name, major, *dev_minor, dev_name);
}
#endif
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/* Get all active devices */
static int _find_all_devs(dm_bitset_t bs, int tp_major, int tp_minor)
{
struct dm_task *dmt;
struct dm_names *names;
unsigned next = 0;
int minor, r = 1;
if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
return 0;
if (!dm_task_run(dmt)) {
r = 0;
goto out;
}
log_verbose("Executing command: %s", state->cmd_str);
if (!(names = dm_task_get_names(dmt))) {
r = 0;
goto out;
}
/* TODO:
* Support parallel run of 'task' and it's waitpid maintainence
* ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running
*/
if (!(state->pid = fork())) {
/* child */
(void) close(0);
for (i = 3; i < 255; ++i) (void) close(i);
execvp(state->argv[0], state->argv);
_exit(errno);
} else if (state->pid == -1) {
log_error("Can't fork command %s.", state->cmd_str);
state->fails = 1;
return 0;
if (!names->dev)
goto out;
do {
names = (struct dm_names *)((char *) names + next);
if (_has_deps(names->name, tp_major, tp_minor, &minor))
dm_bit_set(bs, minor);
next = names->next;
} while (next);
out:
dm_task_destroy(dmt);
return r;
}
static int _run(const char *cmd, ...)
{
va_list ap;
int argc = 1; /* for argv[0], i.e. cmd */
int i = 0;
const char **argv;
pid_t pid = fork();
int status;
if (pid == 0) { /* child */
va_start(ap, cmd);
while (va_arg(ap, const char *))
++argc;
va_end(ap);
/* + 1 for the terminating NULL */
argv = alloca(sizeof(const char *) * (argc + 1));
argv[0] = cmd;
va_start(ap, cmd);
while ((argv[++i] = va_arg(ap, const char *)));
va_end(ap);
execvp(cmd, (char **)argv);
log_sys_error("exec", cmd);
exit(127);
}
if (pid > 0) { /* parent */
if (waitpid(pid, &status, 0) != pid)
return 0; /* waitpid failed */
if (!WIFEXITED(status) || WEXITSTATUS(status))
return 0; /* the child failed */
}
if (pid < 0)
return 0; /* fork failed */
return 1; /* all good */
}
struct mountinfo_s {
const char *device;
struct dm_info info;
dm_bitset_t minors; /* Bitset for active thin pool minors */
};
static int _umount_device(char *buffer, unsigned major, unsigned minor,
char *target, void *cb_data)
{
struct mountinfo_s *data = cb_data;
char *words[10];
if ((major == data->info.major) && dm_bit(data->minors, minor)) {
if (dm_split_words(buffer, DM_ARRAY_SIZE(words), 0, words) < DM_ARRAY_SIZE(words))
words[9] = NULL; /* just don't show device name */
log_info("Unmounting thin %s (%d:%d) of thin pool %s (%u:%u) from mount point \"%s\".",
words[9] ? : "", major, minor, data->device,
data->info.major, data->info.minor,
target);
if (!_run(UMOUNT_COMMAND, "-fl", target, NULL))
log_error("Failed to lazy umount thin %s (%d:%d) from %s: %s.",
words[9], major, minor, target, strerror(errno));
}
return 1;
}
/*
* Find all thin pool LV users and try to umount them.
* TODO: work with read-only thin pool support
*/
static void _umount(struct dm_task *dmt)
{
/* TODO: Convert to use hash to reduce memory usage */
static const size_t MINORS = (1U << 20); /* 20 bit */
struct mountinfo_s data = { NULL };
if (!dm_task_get_info(dmt, &data.info))
return;
data.device = dm_task_get_name(dmt);
if (!(data.minors = dm_bitset_create(NULL, MINORS))) {
log_error("Failed to allocate bitset. Not unmounting %s.", data.device);
goto out;
}
if (!_find_all_devs(data.minors, data.info.major, data.info.minor)) {
log_error("Failed to detect mounted volumes for %s.", data.device);
goto out;
}
if (!dm_mountinfo_read(_umount_device, &data)) {
log_error("Could not parse mountinfo file.");
goto out;
}
out:
if (data.minors)
dm_bitset_destroy(data.minors);
}
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
{
#if THIN_DEBUG
log_debug("dmeventd executes: %s.", state->cmd_str);
log_info("dmeventd executes: %s.", state->cmd_str);
#endif
if (state->argv[0])
return _run_command(state);
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
log_error("Failed command for %s.", dm_task_get_name(dmt));
state->fails = 1;
log_error("Failed to extend thin pool %s.",
dm_task_get_name(dmt));
state->fails++;
return 0;
}
state->fails = 0;
return 1;
}
/* Check if executed command has finished
* Only 1 command may run */
static int _wait_for_pid(struct dso_state *state)
{
int status = 0;
if (state->pid == -1)
return 1;
if (!waitpid(state->pid, &status, WNOHANG))
return 0;
/* Wait for finish */
if (WIFEXITED(status)) {
log_verbose("Child %d exited with status %d.",
state->pid, WEXITSTATUS(status));
state->fails = WEXITSTATUS(status) ? 1 : 0;
} else {
if (WIFSIGNALED(status))
log_verbose("Child %d was terminated with status %d.",
state->pid, WTERMSIG(status));
state->fails = 1;
}
state->pid = -1;
return 1;
}
@ -159,6 +319,7 @@ void process_event(struct dm_task *dmt,
void **user)
{
const char *device = dm_task_get_name(dmt);
int percent;
struct dso_state *state = *user;
struct dm_status_thin_pool *tps = NULL;
void *next = NULL;
@ -166,48 +327,25 @@ void process_event(struct dm_task *dmt,
char *target_type = NULL;
char *params;
int needs_policy = 0;
struct dm_task *new_dmt = NULL;
int needs_umount = 0;
#if THIN_DEBUG
log_debug("Watch for tp-data:%.2f%% tp-metadata:%.2f%%.",
dm_percent_to_round_float(state->data_percent_check, 2),
dm_percent_to_round_float(state->metadata_percent_check, 2));
dm_percent_to_float(state->data_percent_check),
dm_percent_to_float(state->metadata_percent_check));
#endif
if (!_wait_for_pid(state)) {
log_warn("WARNING: Skipping event, child %d is still running (%s).",
state->pid, state->cmd_str);
return;
}
#if 0
/* No longer monitoring, waiting for remove */
if (!state->meta_percent_check && !state->data_percent_check)
return;
#endif
if (event & DM_EVENT_DEVICE_ERROR) {
/* Error -> no need to check and do instant resize */
state->data_percent = state->metadata_percent = 0;
if (_use_policy(dmt, state))
goto out;
stack;
/*
* Rather update oldish status
* since after 'command' processing
* percentage info could have changed a lot.
* If we would get above UMOUNT_THRESH
* we would wait for next sigalarm.
*/
if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS)))
goto_out;
if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt)))
goto_out;
/* Non-blocking status read */
if (!dm_task_no_flush(new_dmt))
log_warn("WARNING: Can't set no_flush for dm status.");
if (!dm_task_run(new_dmt))
goto_out;
dmt = new_dmt;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
@ -219,6 +357,7 @@ void process_event(struct dm_task *dmt,
if (!dm_get_status_thin_pool(state->mem, params, &tps)) {
log_error("Failed to parse status.");
needs_umount = 1;
goto out;
}
@ -233,110 +372,67 @@ void process_event(struct dm_task *dmt,
if (state->known_metadata_size != tps->total_metadata_blocks) {
state->metadata_percent_check = CHECK_MINIMUM;
state->known_metadata_size = tps->total_metadata_blocks;
state->fails = 0;
}
if (state->known_data_size != tps->total_data_blocks) {
state->data_percent_check = CHECK_MINIMUM;
state->known_data_size = tps->total_data_blocks;
state->fails = 0;
}
/*
* Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy
* action is called for: >50%, >55% ... >95%, 100%
*/
state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
if ((state->metadata_percent > WARNING_THRESH) &&
(state->metadata_percent > state->metadata_percent_check))
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
device, dm_percent_to_round_float(state->metadata_percent, 2));
if (state->metadata_percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->metadata_percent > state->metadata_percent_check)
needs_policy = 1;
state->metadata_percent_check = (state->metadata_percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->metadata_percent_check == DM_PERCENT_100)
state->metadata_percent_check--; /* Can't get bigger then 100% */
} else
state->metadata_percent_check = CHECK_MINIMUM;
percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
if (percent >= state->metadata_percent_check) {
/*
* Usage has raised more than CHECK_STEP since the last
* time. Run actions.
*/
state->metadata_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
state->data_percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
if ((state->data_percent > WARNING_THRESH) &&
(state->data_percent > state->data_percent_check))
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
device, dm_percent_to_round_float(state->data_percent, 2));
if (state->data_percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->data_percent > state->data_percent_check)
needs_policy = 1;
state->data_percent_check = (state->data_percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->data_percent_check == DM_PERCENT_100)
state->data_percent_check--; /* Can't get bigger then 100% */
} else
state->data_percent_check = CHECK_MINIMUM;
/* FIXME: extension of metadata needs to be written! */
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
* Avoids too high number of error retries, yet shows some status messages in log regularly.
* i.e. PV could have been pvmoved and VG/LV was locked for a while...
*/
if (state->fails) {
if (state->fails++ <= state->max_fails) {
log_debug("Postponing frequently failing policy (%u <= %u).",
state->fails - 1, state->max_fails);
goto out;
}
if (state->max_fails < MAX_FAILS)
state->max_fails <<= 1;
state->fails = needs_policy = 1; /* Retry failing command */
} else
state->max_fails = 1; /* Reset on success */
if (percent >= UMOUNT_THRESH)
needs_umount = 1;
}
if (needs_policy)
_use_policy(dmt, state);
percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
if (percent >= state->data_percent_check) {
/*
* Usage has raised more than CHECK_STEP since
* the last time. Run actions.
*/
state->data_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
if (percent >= UMOUNT_THRESH)
needs_umount = 1;
}
if (needs_policy &&
_use_policy(dmt, state))
needs_umount = 0; /* No umount when command was successful */
out:
if (needs_umount) {
_umount(dmt);
/* Until something changes, do not retry any more actions */
state->data_percent_check = state->metadata_percent_check = (DM_PERCENT_1 * 101);
}
if (tps)
dm_pool_free(state->mem, tps);
if (new_dmt)
dm_task_destroy(new_dmt);
}
/* Handle SIGCHLD for a thread */
static void _sig_child(int signum __attribute__((unused)))
{
/* empty SIG_IGN */;
}
/* Setup handler for SIGCHLD when executing external command
* to get quick 'waitpid()' reaction
* It will interrupt syscall just like SIGALRM and
* invoke process_event().
*/
static void _init_thread_signals(struct dso_state *state)
{
struct sigaction act = { .sa_handler = _sig_child };
sigset_t my_sigset;
sigemptyset(&my_sigset);
if (sigaction(SIGCHLD, &act, NULL))
log_warn("WARNING: Failed to set SIGCHLD action.");
else if (sigaddset(&my_sigset, SIGCHLD))
log_warn("WARNING: Failed to add SIGCHLD to set.");
else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
log_warn("WARNING: Failed to unblock SIGCHLD.");
else
state->restore_sigset = 1;
}
static void _restore_thread_signals(struct dso_state *state)
{
if (state->restore_sigset &&
pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
log_warn("WARNING: Failed to block SIGCHLD.");
if (state->fails >= MAX_FAILS) {
log_warn("WARNING: Dropping monitoring of %s. "
"lvm2 command fails too often (%u times in row).",
device, state->fails);
pthread_kill(pthread_self(), SIGALRM);
}
}
int register_device(const char *device,
@ -346,56 +442,28 @@ int register_device(const char *device,
void **user)
{
struct dso_state *state;
char *str;
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str),
"_dmeventd_thin_command", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
sizeof(state->cmd_str),
"lvextend --use-policies",
device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (strncmp(cmd_str, "lvm ", 4) == 0) {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
log_error("Failed to copy lvm command.");
goto bad;
}
} else if (cmd_str[0] == '/') {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
log_error("Failed to copy thin command.");
goto bad;
}
/* Find last space before 'vg/lv' */
if (!(str = strrchr(state->cmd_str, ' ')))
goto inval;
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
str - state->cmd_str))) {
log_error("Failed to copy command.");
goto bad;
}
state->argv[1] = str + 1; /* 1 argument - vg/lv */
_init_thread_signals(state);
} else /* Unuspported command format */
goto inval;
state->pid = -1;
state->metadata_percent_check = CHECK_MINIMUM;
state->data_percent_check = CHECK_MINIMUM;
*user = state;
log_info("Monitoring thin pool %s.", device);
return 1;
inval:
log_error("Invalid command for monitoring: %s.", cmd_str);
bad:
log_error("Failed to monitor thin pool %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}
@ -406,28 +474,6 @@ int unregister_device(const char *device,
void **user)
{
struct dso_state *state = *user;
int i;
for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
if (i == 0)
/* Give it 2 seconds, then try to terminate & kill it */
log_verbose("Child %d still not finished (%s) waiting.",
state->pid, state->cmd_str);
else if (i == 3) {
log_warn("WARNING: Terminating child %d.", state->pid);
kill(state->pid, SIGINT);
kill(state->pid, SIGTERM);
} else if (i == 5) {
log_warn("WARNING: Killing child %d.", state->pid);
kill(state->pid, SIGKILL);
}
sleep(1);
}
if (state->pid != -1)
log_warn("WARNING: Cannot kill child %d!", state->pid);
_restore_thread_signals(state);
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring thin pool %s.", device);

View File

@ -1,3 +0,0 @@
process_event
register_device
unregister_device

View File

@ -1,413 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "daemons/dmeventd/plugins/lvm2/dmeventd_lvm.h"
#include "daemons/dmeventd/libdevmapper-event.h"
/*
* Use parser from new device_mapper library.
* Although during compilation we can see dm_vdo_status_parse()
* in runtime we are linked agains systems libdm 'older' library
* which does not provide this symbol and plugin fails to load
*/
/* coverity[unnecessary_header] used for parsing */
#include "device_mapper/vdo/status.c"
#include <sys/wait.h>
#include <stdarg.h>
/* First warning when VDO pool is 80% full. */
#define WARNING_THRESH (DM_PERCENT_1 * 80)
/* Run a check every 5%. */
#define CHECK_STEP (DM_PERCENT_1 * 5)
/* Do not bother checking VDO pool is less than 50% full. */
#define CHECK_MINIMUM (DM_PERCENT_1 * 50)
#define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */
#define VDO_DEBUG 0
struct dso_state {
struct dm_pool *mem;
int percent_check;
int percent;
uint64_t known_data_size;
unsigned fails;
unsigned max_fails;
int restore_sigset;
sigset_t old_sigset;
pid_t pid;
char *argv[3];
const char *cmd_str;
const char *name;
};
DM_EVENT_LOG_FN("vdo")
static int _run_command(struct dso_state *state)
{
char val[16];
int i;
/* Mark for possible lvm2 command we are running from dmeventd
* lvm2 will not try to talk back to dmeventd while processing it */
(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
if (state->percent) {
/* Prepare some known data to env vars for easy use */
if (dm_snprintf(val, sizeof(val), "%d",
state->percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_VDO_POOL", val, 1);
} else {
/* For an error event it's for a user to check status and decide */
log_debug("Error event processing.");
}
log_verbose("Executing command: %s", state->cmd_str);
/* TODO:
* Support parallel run of 'task' and it's waitpid maintainence
* ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running
*/
if (!(state->pid = fork())) {
/* child */
(void) close(0);
for (i = 3; i < 255; ++i) (void) close(i);
execvp(state->argv[0], state->argv);
_exit(errno);
} else if (state->pid == -1) {
log_error("Can't fork command %s.", state->cmd_str);
state->fails = 1;
return 0;
}
return 1;
}
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
{
#if VDO_DEBUG
log_debug("dmeventd executes: %s.", state->cmd_str);
#endif
if (state->argv[0])
return _run_command(state);
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
log_error("Failed command for %s.", dm_task_get_name(dmt));
state->fails = 1;
return 0;
}
state->fails = 0;
return 1;
}
/* Check if executed command has finished
* Only 1 command may run */
static int _wait_for_pid(struct dso_state *state)
{
int status = 0;
if (state->pid == -1)
return 1;
if (!waitpid(state->pid, &status, WNOHANG))
return 0;
/* Wait for finish */
if (WIFEXITED(status)) {
log_verbose("Child %d exited with status %d.",
state->pid, WEXITSTATUS(status));
state->fails = WEXITSTATUS(status) ? 1 : 0;
} else {
if (WIFSIGNALED(status))
log_verbose("Child %d was terminated with status %d.",
state->pid, WTERMSIG(status));
state->fails = 1;
}
state->pid = -1;
return 1;
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
{
const char *device = dm_task_get_name(dmt);
struct dso_state *state = *user;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
int needs_policy = 0;
struct dm_task *new_dmt = NULL;
struct dm_vdo_status_parse_result vdop = { .status = NULL };
#if VDO_DEBUG
log_debug("Watch for VDO %s:%.2f%%.", state->name,
dm_percent_to_round_float(state->percent_check, 2));
#endif
if (!_wait_for_pid(state)) {
log_warn("WARNING: Skipping event, child %d is still running (%s).",
state->pid, state->cmd_str);
return;
}
if (event & DM_EVENT_DEVICE_ERROR) {
#if VDO_DEBUG
log_debug("VDO event error.");
#endif
/* Error -> no need to check and do instant resize */
state->percent = 0;
if (_use_policy(dmt, state))
goto out;
stack;
if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS)))
goto_out;
if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt)))
goto_out;
/* Non-blocking status read */
if (!dm_task_no_flush(new_dmt))
log_warn("WARNING: Can't set no_flush for dm status.");
if (!dm_task_run(new_dmt))
goto_out;
dmt = new_dmt;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type || (strcmp(target_type, "vdo") != 0)) {
log_error("Invalid target type.");
goto out;
}
if (!dm_vdo_status_parse(state->mem, params, &vdop)) {
log_error("Failed to parse status.");
goto out;
}
state->percent = dm_make_percent(vdop.status->used_blocks,
vdop.status->total_blocks);
#if VDO_DEBUG
log_debug("VDO %s status %.2f%% " FMTu64 "/" FMTu64 ".",
state->name, dm_percent_to_round_float(state->percent, 2),
vdop.status->used_blocks, vdop.status->total_blocks);
#endif
/* VDO pool size had changed. Clear the threshold. */
if (state->known_data_size != vdop.status->total_blocks) {
state->percent_check = CHECK_MINIMUM;
state->known_data_size = vdop.status->total_blocks;
state->fails = 0;
}
/*
* Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy
* action is called for: >50%, >55% ... >95%, 100%
*/
if ((state->percent > WARNING_THRESH) &&
(state->percent > state->percent_check))
log_warn("WARNING: VDO %s %s is now %.2f%% full.",
state->name, device,
dm_percent_to_round_float(state->percent, 2));
if (state->percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->percent > state->percent_check)
needs_policy = 1;
state->percent_check = (state->percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->percent_check == DM_PERCENT_100)
state->percent_check--; /* Can't get bigger then 100% */
} else
state->percent_check = CHECK_MINIMUM;
/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
* Avoids too high number of error retries, yet shows some status messages in log regularly.
* i.e. PV could have been pvmoved and VG/LV was locked for a while...
*/
if (state->fails) {
if (state->fails++ <= state->max_fails) {
log_debug("Postponing frequently failing policy (%u <= %u).",
state->fails - 1, state->max_fails);
goto out;
}
if (state->max_fails < MAX_FAILS)
state->max_fails <<= 1;
state->fails = needs_policy = 1; /* Retry failing command */
} else
state->max_fails = 1; /* Reset on success */
if (needs_policy)
_use_policy(dmt, state);
out:
if (vdop.status)
dm_pool_free(state->mem, vdop.status);
if (new_dmt)
dm_task_destroy(new_dmt);
}
/* Handle SIGCHLD for a thread */
static void _sig_child(int signum __attribute__((unused)))
{
/* empty SIG_IGN */;
}
/* Setup handler for SIGCHLD when executing external command
* to get quick 'waitpid()' reaction
* It will interrupt syscall just like SIGALRM and
* invoke process_event().
*/
static void _init_thread_signals(struct dso_state *state)
{
struct sigaction act = { .sa_handler = _sig_child };
sigset_t my_sigset;
sigemptyset(&my_sigset);
if (sigaction(SIGCHLD, &act, NULL))
log_warn("WARNING: Failed to set SIGCHLD action.");
else if (sigaddset(&my_sigset, SIGCHLD))
log_warn("WARNING: Failed to add SIGCHLD to set.");
else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
log_warn("WARNING: Failed to unblock SIGCHLD.");
else
state->restore_sigset = 1;
}
static void _restore_thread_signals(struct dso_state *state)
{
if (state->restore_sigset &&
pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
log_warn("WARNING: Failed to block SIGCHLD.");
}
int register_device(const char *device,
const char *uuid,
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
{
struct dso_state *state;
const char *cmd;
char *str;
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
const char *name = "pool";
if (!dmeventd_lvm2_init_with_pool("vdo_pool_state", state))
goto_bad;
state->cmd_str = "";
/* Search for command for LVM- prefixed devices only */
cmd = (strncmp(uuid, "LVM-", 4) == 0) ? "_dmeventd_vdo_command" : "";
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str), cmd, device))
goto_bad;
if (strncmp(cmd_str, "lvm ", 4) == 0) {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
log_error("Failed to copy lvm VDO command.");
goto bad;
}
} else if (cmd_str[0] == '/') {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
log_error("Failed to copy VDO command.");
goto bad;
}
/* Find last space before 'vg/lv' */
if (!(str = strrchr(state->cmd_str, ' ')))
goto inval;
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
str - state->cmd_str))) {
log_error("Failed to copy command.");
goto bad;
}
state->argv[1] = str + 1; /* 1 argument - vg/lv */
_init_thread_signals(state);
} else if (cmd[0] == 0) {
state->name = "volume"; /* What to use with 'others?' */
} else/* Unuspported command format */
goto inval;
state->pid = -1;
state->name = name;
*user = state;
log_info("Monitoring VDO %s %s.", name, device);
return 1;
inval:
log_error("Invalid command for monitoring: %s.", cmd_str);
bad:
log_error("Failed to monitor VDO %s %s.", name, device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}
int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
{
struct dso_state *state = *user;
const char *name = state->name;
int i;
for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
if (i == 0)
/* Give it 2 seconds, then try to terminate & kill it */
log_verbose("Child %d still not finished (%s) waiting.",
state->pid, state->cmd_str);
else if (i == 3) {
log_warn("WARNING: Terminating child %d.", state->pid);
kill(state->pid, SIGINT);
kill(state->pid, SIGTERM);
} else if (i == 5) {
log_warn("WARNING: Killing child %d.", state->pid);
kill(state->pid, SIGKILL);
}
sleep(1);
}
if (state->pid != -1)
log_warn("WARNING: Cannot kill child %d!", state->pid);
_restore_thread_signals(state);
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring VDO %s %s.", name, device);
return 1;
}

View File

@ -1,4 +0,0 @@
path.py
lvmdbusd
lvmdb.py
lvm_shell_proxy.py

View File

@ -15,8 +15,7 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
lvmdbuspydir = $(python3dir)/lvmdbusd
lvmdbusdir = $(DESTDIR)$(lvmdbuspydir)
lvmdbusdir = $(python3dir)/lvmdbusd
LVMDBUS_SRCDIR_FILES = \
automatedproperties.py \
@ -24,51 +23,45 @@ LVMDBUS_SRCDIR_FILES = \
cfg.py \
cmdhandler.py \
fetch.py \
__init__.py \
job.py \
loader.py \
lv.py \
lvmdb.py \
main.py \
lvm_shell_proxy.py \
lv.py \
manager.py \
objectmanager.py \
pv.py \
refresh.py \
request.py \
state.py \
udevwatch.py \
utils.py \
vg.py \
__init__.py
vg.py
LVMDBUS_BUILDDIR_FILES = \
lvmdb.py \
lvm_shell_proxy.py \
path.py
LVMDBUSD = lvmdbusd
CLEAN_DIRS += __pycache__
LVMDBUSD = $(srcdir)/lvmdbusd
include $(top_builddir)/make.tmpl
.PHONY: install_lvmdbusd
all:
$(Q) test -x $(LVMDBUSD) || chmod 755 $(LVMDBUSD)
install_lvmdbusd: $(LVMDBUSD)
@echo " [INSTALL] $<"
$(Q) $(INSTALL_DIR) $(sbindir)
$(Q) $(INSTALL_SCRIPT) $(LVMDBUSD) $(sbindir)
$(Q) $(INSTALL_DIR) $(lvmdbusdir)
$(Q) (cd $(srcdir); $(INSTALL_DATA) $(LVMDBUS_SRCDIR_FILES) $(lvmdbusdir))
$(Q) $(INSTALL_DATA) $(LVMDBUS_BUILDDIR_FILES) $(lvmdbusdir)
$(Q) PYTHON=$(PYTHON3) $(PYCOMPILE) --destdir "$(DESTDIR)" --basedir "$(lvmdbuspydir)" $(LVMDBUS_SRCDIR_FILES) $(LVMDBUS_BUILDDIR_FILES)
$(Q) $(CHMOD) 755 $(lvmdbusdir)/__pycache__
$(Q) $(CHMOD) 444 $(lvmdbusdir)/__pycache__/*.py[co]
install_lvmdbusd:
$(INSTALL_DIR) $(sbindir)
$(INSTALL_SCRIPT) $(LVMDBUSD) $(sbindir)
$(INSTALL_DIR) $(DESTDIR)$(lvmdbusdir)
(cd $(srcdir); $(INSTALL_DATA) $(LVMDBUS_SRCDIR_FILES) $(DESTDIR)$(lvmdbusdir))
$(INSTALL_DATA) $(LVMDBUS_BUILDDIR_FILES) $(DESTDIR)$(lvmdbusdir)
PYTHON=$(PYTHON3) $(PYCOMPILE) --destdir "$(DESTDIR)" --basedir "$(lvmdbusdir)" $(LVMDBUS_SRCDIR_FILES) $(LVMDBUS_BUILDDIR_FILES)
$(CHMOD) 755 $(DESTDIR)$(lvmdbusdir)/__pycache__
$(CHMOD) 444 $(DESTDIR)$(lvmdbusdir)/__pycache__/*.py[co]
install_lvm2: install_lvmdbusd
install: install_lvm2
DISTCLEAN_TARGETS+= \
$(LVMDBUS_BUILDDIR_FILES) \
$(LVMDBUSD)
$(LVMDBUS_BUILDDIR_FILES)

View File

@ -38,7 +38,7 @@ class AutomatedProperties(dbus.service.Object):
props = {}
for i in self.interface():
props[i] = AutomatedProperties._get_all_prop(self, i)
props[i] = self.GetAll(i)
return self._ap_o_path, props
@ -65,52 +65,31 @@ class AutomatedProperties(dbus.service.Object):
return self._ap_interface
@staticmethod
def _get_prop(obj, interface_name, property_name):
value = getattr(obj, property_name)
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v')
def Get(self, interface_name, property_name):
value = getattr(self, property_name)
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
log_debug('Get (%s), type (%s), value(%s)' %
(property_name, str(type(value)), str(value)))
return value
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v',
async_callbacks=('cb', 'cbe'))
def Get(self, interface_name, property_name, cb, cbe):
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
r = cfg.create_request_entry(
-1, AutomatedProperties._get_prop,
(self, interface_name, property_name),
cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _get_all_prop(obj, interface_name):
if interface_name in obj.interface(True):
in_signature='s', out_signature='a{sv}')
def GetAll(self, interface_name):
if interface_name in self.interface(True):
# Using introspection, lets build this dynamically
properties = get_properties(obj)
properties = get_properties(self)
if interface_name in properties:
return properties[interface_name][1]
return {}
raise dbus.exceptions.DBusException(
obj._ap_interface,
self._ap_interface,
'The object %s does not implement the %s interface'
% (obj.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}',
async_callbacks=('cb', 'cbe'))
def GetAll(self, interface_name, cb, cbe):
r = cfg.create_request_entry(
-1, AutomatedProperties._get_all_prop,
(self, interface_name),
cb, cbe, False)
cfg.worker_q.put(r)
% (self.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ssv')
@ -155,17 +134,16 @@ class AutomatedProperties(dbus.service.Object):
# through all dbus objects as some don't have a search method, like
# 'Manager' object.
if not self._ap_search_method:
return 0
return
search = self.lvm_id
if search_key:
search = search_key
# Either we have the new object state or we need to go fetch it
if object_state:
new_state = object_state
else:
if search_key:
search = search_key
else:
search = self.lvm_id
new_state = self._ap_search_method([search])[0]
assert isinstance(new_state, State)
@ -181,7 +159,10 @@ class AutomatedProperties(dbus.service.Object):
cfg.om.lookup_update(self, new_id[0], new_id[1])
# Grab the properties values, then replace the state of the object
# and retrieve the new values.
# and retrieve the new values
# TODO: We need to add locking to prevent concurrent access to the
# properties so that a client is not accessing while we are
# replacing.
o_prop = get_properties(self)
self.state = new_state
n_prop = get_properties(self)

View File

@ -7,16 +7,18 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import subprocess
from . import cfg
from .cmdhandler import options_to_cli_args, LvmExecutionMeta, call_lvm
import dbus
from .utils import pv_range_append, pv_dest_ranges, log_error, log_debug,\
mt_async_call
from .request import RequestEntry
import threading
import time
from .cmdhandler import options_to_cli_args
import dbus
from .utils import pv_range_append, pv_dest_ranges, log_error, log_debug
import traceback
import os
_rlock = threading.RLock()
_thread_list = list()
def pv_move_lv_cmd(move_options, lv_full_name,
@ -40,50 +42,15 @@ def lv_merge_cmd(merge_options, lv_full_name):
return cmd
def _load_wrapper(ignored):
cfg.load()
def _move_merge(interface_name, cmd, job_state):
add(cmd, job_state)
def _move_callback(job_state, line_str):
try:
if line_str.count(':') == 2:
(device, ignore, percentage) = line_str.split(':')
job_state.Percent = int(round(
float(percentage.strip()[:-1]), 1))
# While the move is in progress we need to periodically update
# the state to reflect where everything is at. we will do this
# by scheduling the load to occur in the main work queue.
r = RequestEntry(
-1, _load_wrapper, ("_move_callback: load",), None, None, False)
cfg.worker_q.put(r)
except ValueError:
log_error("Trying to parse percentage which failed for %s" % line_str)
def _move_merge(interface_name, command, job_state):
# We need to execute these command stand alone by forking & exec'ing
# the command always as we will be getting periodic output from them on
# the status of the long running operation.
meta = LvmExecutionMeta(time.time(), 0, command, -1000, None, None)
cfg.blackbox.add(meta)
ec, stdout, stderr = call_lvm(command, line_cb=_move_callback,
cb_data=job_state)
with meta.lock:
meta.ended = time.time()
meta.ec = ec
meta.stderr_txt = stderr
if ec == 0:
job_state.Percent = 100
else:
done = job_state.Wait(-1)
if not done:
ec, err_msg = job_state.GetError
raise dbus.exceptions.DBusException(
interface_name,
'Exit code %s, stderr = %s' % (str(ec), stderr))
'Exit code %s, stderr = %s' % (str(ec), err_msg))
cfg.load()
return '/'
@ -118,6 +85,8 @@ def move(interface_name, lv_name, pv_src_obj, pv_source_range,
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
# Generate the command line for this command, but don't
# execute it.
cmd = pv_move_lv_cmd(move_options,
lv_name,
pv_src.lvm_id,
@ -142,15 +111,104 @@ def merge(interface_name, lv_uuid, lv_name, merge_options, job_state):
'LV with uuid %s and name %s not present!' % (lv_uuid, lv_name))
def _run_cmd(req):
log_debug(
"_run_cmd: Running method: %s with args %s" %
(str(req.method), str(req.arguments)))
req.run_cmd()
log_debug("_run_cmd: complete!")
def background_reaper():
while cfg.run.value != 0:
with _rlock:
num_threads = len(_thread_list) - 1
if num_threads >= 0:
for i in range(num_threads, -1, -1):
_thread_list[i].join(0)
if not _thread_list[i].is_alive():
log_debug("Removing thread: %s" % _thread_list[i].name)
_thread_list.pop(i)
time.sleep(3)
def cmd_runner(request):
t = threading.Thread(target=_run_cmd, args=(request,),
name="cmd_runner %s" % str(request.method))
def background_execute(command, background_job):
# Wrap this whole operation in an exception handler, otherwise if we
# hit a code bug we will silently exit this thread without anyone being
# the wiser.
try:
# We need to execute these command stand alone by forking & exec'ing
# the command always!
command.insert(0, cfg.LVM_CMD)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
env=os.environ,
stderr=subprocess.PIPE, close_fds=True)
log_debug("Background process for %s is %d" %
(str(command), process.pid))
lines_iterator = iter(process.stdout.readline, b"")
for line in lines_iterator:
line_str = line.decode("utf-8")
# Check to see if the line has the correct number of separators
try:
if line_str.count(':') == 2:
(device, ignore, percentage) = line_str.split(':')
background_job.Percent = round(
float(percentage.strip()[:-1]), 1)
except ValueError:
log_error("Trying to parse percentage which failed for %s" %
line_str)
out = process.communicate()
if process.returncode == 0:
background_job.Percent = 100
else:
log_error("Failed to execute background job %s, STDERR= %s"
% (str(command), out[1]))
background_job.set_result(process.returncode, out[1])
log_debug("Background process %d complete!" % process.pid)
except Exception:
# In the unlikely event that we blow up, we need to unblock caller which
# is waiting on an answer.
st = traceback.format_exc()
error = "Exception in background thread: \n%s" % st
log_error(error)
background_job.set_result(1, error)
def add(command, reporting_job):
# Create the thread, get it running and then add it to the list
t = threading.Thread(
target=background_execute,
name="thread: " + ' '.join(command),
args=(command, reporting_job))
t.start()
with _rlock:
_thread_list.append(t)
def wait_thread(job, timeout, cb, cbe):
# We need to put the wait on it's own thread, so that we don't block the
# entire dbus queue processing thread
try:
cb(job.state.Wait(timeout))
except Exception as e:
cbe("Wait exception: %s" % str(e))
return 0
def add_wait(job, timeout, cb, cbe):
if timeout == 0:
# Users are basically polling, do not create thread
cb(job.Complete)
else:
t = threading.Thread(
target=wait_thread,
name="thread job.Wait: %s" % job.dbus_object_path(),
args=(job, timeout, cb, cbe)
)
t.start()
with _rlock:
_thread_list.append(t)

View File

@ -26,7 +26,7 @@ bus = None
args = None
# Set to true if we are depending on external events for updates
got_external_event = False
ee = False
# Shared state variable across all processes
run = multiprocessing.Value('i', 1)
@ -38,20 +38,18 @@ SHELL_IN_USE = None
# Lock used by pprint
stdout_lock = multiprocessing.Lock()
kick_q = multiprocessing.Queue()
worker_q = queue.Queue()
# Main event loop
loop = None
BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1')
BASE_INTERFACE = 'com.redhat.lvmdbus1'
PV_INTERFACE = BASE_INTERFACE + '.Pv'
VG_INTERFACE = BASE_INTERFACE + '.Vg'
VG_VDO_INTERFACE = BASE_INTERFACE + '.VgVdo'
LV_INTERFACE = BASE_INTERFACE + '.Lv'
LV_COMMON_INTERFACE = BASE_INTERFACE + '.LvCommon'
THIN_POOL_INTERFACE = BASE_INTERFACE + '.ThinPool'
VDO_POOL_INTERFACE = BASE_INTERFACE + '.VdoPool'
CACHE_POOL_INTERFACE = BASE_INTERFACE + '.CachePool'
LV_CACHED = BASE_INTERFACE + '.CachedLv'
SNAPSHOT_INTERFACE = BASE_INTERFACE + '.Snapshot'
@ -63,7 +61,6 @@ PV_OBJ_PATH = BASE_OBJ_PATH + '/Pv'
VG_OBJ_PATH = BASE_OBJ_PATH + '/Vg'
LV_OBJ_PATH = BASE_OBJ_PATH + '/Lv'
THIN_POOL_PATH = BASE_OBJ_PATH + "/ThinPool"
VDO_POOL_PATH = BASE_OBJ_PATH + "/VdoPool"
CACHE_POOL_PATH = BASE_OBJ_PATH + "/CachePool"
HIDDEN_LV_PATH = BASE_OBJ_PATH + "/HiddenLv"
MANAGER_OBJ_PATH = BASE_OBJ_PATH + '/Manager'
@ -74,33 +71,15 @@ pv_id = itertools.count()
vg_id = itertools.count()
lv_id = itertools.count()
thin_id = itertools.count()
vdo_id = itertools.count()
cache_pool_id = itertools.count()
job_id = itertools.count()
hidden_lv = itertools.count()
# Used to prevent circular imports...
load = None
event = None
# Boolean to denote if lvm supports VDO integration
vdo_support = False
# Global cached state
db = None
# lvm flight recorder
blackbox = None
# RequestEntry ctor
create_request_entry = None
def exit_daemon():
"""
Exit the daemon cleanly
:return:
"""
if run and loop:
run.value = 0
loop.quit()

View File

@ -8,7 +8,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import select
import time
import threading
from itertools import chain
@ -16,10 +15,14 @@ import collections
import traceback
import os
from lvmdbusd import cfg
from lvmdbusd.utils import pv_dest_ranges, log_debug, log_error, add_no_notify,\
make_non_block, read_decoded
from lvmdbusd.lvm_shell_proxy import LVMShellProxy
try:
from . import cfg
from .utils import pv_dest_ranges, log_debug, log_error
from .lvm_shell_proxy import LVMShellProxy
except SystemError:
import cfg
from utils import pv_dest_ranges, log_debug, log_error
from lvm_shell_proxy import LVMShellProxy
try:
import simplejson as json
@ -39,7 +42,6 @@ cmd_lock = threading.RLock()
class LvmExecutionMeta(object):
def __init__(self, start, ended, cmd, ec, stdout_txt, stderr_txt):
self.lock = threading.RLock()
self.start = start
self.ended = ended
self.cmd = cmd
@ -48,30 +50,28 @@ class LvmExecutionMeta(object):
self.stderr_txt = stderr_txt
def __str__(self):
with self.lock:
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
class LvmFlightRecorder(object):
def __init__(self, size=16):
self.queue = collections.deque(maxlen=size)
def __init__(self):
self.queue = collections.deque(maxlen=16)
def add(self, lvm_exec_meta):
self.queue.append(lvm_exec_meta)
def dump(self):
with cmd_lock:
if len(self.queue):
log_error("LVM dbus flight recorder START")
for c in reversed(self.queue):
log_error(str(c))
log_error("LVM dbus flight recorder END")
log_error("LVM dbus flight recorder START")
for c in self.queue:
log_error(str(c))
log_error("LVM dbus flight recorder END")
cfg.blackbox = LvmFlightRecorder()
@ -84,68 +84,26 @@ def _debug_c(cmd, exit_code, out):
log_error(("STDERR=\n %s\n" % out[1]))
def call_lvm(command, debug=False, line_cb=None,
cb_data=None):
def call_lvm(command, debug=False):
"""
Call an executable and return a tuple of exitcode, stdout, stderr
:param command: Command to execute
:param debug: Dump debug to stdout
:param line_cb: Call the supplied function for each line read from
stdin, CALL MUST EXECUTE QUICKLY and not *block*
otherwise call_lvm function will fail to read
stdin/stdout. Return value of call back is ignored
:param cb_data: Supplied to callback to allow caller access to
its own data
# Callback signature
def my_callback(my_context, line_read_stdin)
pass
:param command: Command to execute
:param debug: Dump debug to stdout
"""
# print 'STACK:'
# for line in traceback.format_stack():
# print line.strip()
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command.insert(0, cfg.LVM_CMD)
command = add_no_notify(command)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True,
env=os.environ)
out = process.communicate()
stdout_text = ""
stderr_text = ""
stdout_index = 0
make_non_block(process.stdout)
make_non_block(process.stderr)
while True:
try:
rd_fd = [process.stdout.fileno(), process.stderr.fileno()]
ready = select.select(rd_fd, [], [], 2)
for r in ready[0]:
if r == process.stdout.fileno():
stdout_text += read_decoded(process.stdout)
elif r == process.stderr.fileno():
stderr_text += read_decoded(process.stderr)
if line_cb is not None:
# Process the callback for each line read!
while True:
i = stdout_text.find("\n", stdout_index)
if i != -1:
try:
line_cb(cb_data, stdout_text[stdout_index:i])
except:
st = traceback.format_exc()
log_error("call_lvm: line_cb exception: \n %s" % st)
stdout_index = i + 1
else:
break
# Check to see if process has terminated, None when running
if process.poll() is not None:
break
except IOError as ioe:
log_debug("call_lvm:" + str(ioe))
pass
stdout_text = bytes(out[0]).decode("utf-8")
stderr_text = bytes(out[1]).decode("utf-8")
if debug or process.returncode != 0:
_debug_c(command, process.returncode, (stdout_text, stderr_text))
@ -159,7 +117,6 @@ _t_call = call_lvm
def _shell_cfg():
global _t_call
# noinspection PyBroadException
try:
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
@ -260,10 +217,7 @@ def options_to_cli_args(options):
else:
rc.append("--%s" % k)
if v != "":
if isinstance(v, int):
rc.append(str(int(v)))
else:
rc.append(str(v))
rc.append(str(v))
return rc
@ -309,10 +263,10 @@ def lv_tag(lv_name, add, rm, tag_options):
return _tag('lvchange', lv_name, add, rm, tag_options)
def vg_rename(vg_uuid, new_name, rename_options):
def vg_rename(vg, new_name, rename_options):
cmd = ['vgrename']
cmd.extend(options_to_cli_args(rename_options))
cmd.extend([vg_uuid, new_name])
cmd.extend([vg, new_name])
return call(cmd)
@ -326,8 +280,8 @@ def vg_remove(vg_name, remove_options):
def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--size', '%dB' % size_bytes])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
@ -338,38 +292,38 @@ def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
cmd.extend(["-s"])
if size_bytes != 0:
cmd.extend(['--size', '%dB' % size_bytes])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', '%dB' % size_bytes])
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', '%dB' % size_bytes])
cmd.extend(['--yes'])
return cmd
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--stripes', str(int(num_stripes))])
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(int(stripe_size_kb))])
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name])
return call(cmd)
@ -382,15 +336,15 @@ def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', raid_type])
cmd.extend(['--size', '%dB' % size_bytes])
cmd.extend(['--size', str(size_bytes) + 'B'])
if num_stripes != 0:
cmd.extend(['--stripes', str(int(num_stripes))])
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(int(stripe_size_kb))])
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
@ -403,15 +357,14 @@ def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
size_bytes, num_stripes, stripe_size_kb)
def vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies):
def vg_lv_create_mirror(vg_name, create_options, name, size_bytes, num_copies):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'mirror'])
cmd.extend(['--mirrors', str(int(num_copies))])
cmd.extend(['--size', '%dB' % size_bytes])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--mirrors', str(num_copies)])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
@ -431,24 +384,6 @@ def vg_create_thin_pool(md_full_name, data_full_name, create_options):
return call(cmd)
def vg_create_vdo_pool_lv_and_lv(vg_name, pool_name, lv_name, data_size,
virtual_size, create_options):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['-y', '--type', 'vdo', '-n', lv_name,
'-L', '%dB' % data_size, '-V', '%dB' % virtual_size,
"%s/%s" % (vg_name, pool_name)])
return call(cmd)
def vg_create_vdo_pool(pool_full_name, lv_name, virtual_size, create_options):
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'vdo-pool', '-n', lv_name, '--force', '-y',
'-V', '%dB' % virtual_size, pool_full_name])
return call(cmd)
def lv_remove(lv_path, remove_options):
cmd = ['lvremove']
cmd.extend(options_to_cli_args(remove_options))
@ -482,8 +417,8 @@ def lv_resize(lv_full_name, size_change, pv_dests,
def lv_lv_create(lv_full_name, create_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--virtualsize', '%dB' % size_bytes, '-T'])
cmd.extend(['--name', name, lv_full_name, '--yes'])
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
cmd.extend(['--name', name, lv_full_name])
return call(cmd)
@ -496,15 +431,6 @@ def lv_cache_lv(cache_pool_full_name, lv_full_name, cache_options):
return call(cmd)
def lv_writecache_lv(cache_lv_full_name, lv_full_name, cache_options):
# lvconvert --type writecache --cachevol VG/CacheLV VG/OriginLV
cmd = ['lvconvert']
cmd.extend(options_to_cli_args(cache_options))
cmd.extend(['-y', '--type', 'writecache', '--cachevol',
cache_lv_full_name, lv_full_name])
return call(cmd)
def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
cmd = ['lvconvert']
if destroy_cache:
@ -520,28 +446,6 @@ def lv_detach_cache(lv_full_name, detach_options, destroy_cache):
return call(cmd)
def lv_vdo_compression(lv_path, enable, comp_options):
cmd = ['lvchange', '--compression']
if enable:
cmd.append('y')
else:
cmd.append('n')
cmd.extend(options_to_cli_args(comp_options))
cmd.append(lv_path)
return call(cmd)
def lv_vdo_deduplication(lv_path, enable, dedup_options):
cmd = ['lvchange', '--deduplication']
if enable:
cmd.append('y')
else:
cmd.append('n')
cmd.extend(options_to_cli_args(dedup_options))
cmd.append(lv_path)
return call(cmd)
def supports_json():
cmd = ['help']
rc, out, err = call(cmd)
@ -554,16 +458,6 @@ def supports_json():
return False
def supports_vdo():
cmd = ['segtypes']
rc, out, err = call(cmd)
if rc == 0:
if "vdo" in out:
log_debug("We have VDO support")
return True
return False
def lvm_full_report_json():
pv_columns = ['pv_name', 'pv_uuid', 'pv_fmt', 'pv_size', 'pv_free',
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
@ -591,22 +485,6 @@ def lvm_full_report_json():
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
if cfg.vdo_support:
lv_columns.extend(
['vdo_operating_mode', 'vdo_compression_state', 'vdo_index_state',
'vdo_used_size', 'vdo_saving_percent']
)
lv_seg_columns.extend(
['vdo_compression', 'vdo_deduplication',
'vdo_use_metadata_hints', 'vdo_minimum_io_size',
'vdo_block_map_cache_size', 'vdo_block_map_era_length',
'vdo_use_sparse_index', 'vdo_index_memory_size',
'vdo_slab_size', 'vdo_ack_threads', 'vdo_bio_threads',
'vdo_bio_rotation', 'vdo_cpu_threads', 'vdo_hash_zone_threads',
'vdo_logical_threads', 'vdo_physical_threads',
'vdo_max_discard', 'vdo_write_policy', 'vdo_header_size'])
cmd = _dc('fullreport', [
'-a', # Need hidden too
'--configreport', 'pv', '-o', ','.join(pv_columns),
@ -618,8 +496,7 @@ def lvm_full_report_json():
])
rc, out, err = call(cmd)
# When we have an exported vg the exit code of lvs or fullreport will be 5
if rc == 0 or rc == 5:
if rc == 0:
# With the current implementation, if we are using the shell then we
# are using JSON and JSON is returned back to us as it was parsed to
# figure out if we completed OK or not
@ -627,13 +504,7 @@ def lvm_full_report_json():
assert(type(out) == dict)
return out
else:
try:
return json.loads(out)
except json.decoder.JSONDecodeError as joe:
log_error("JSONDecodeError %s, \n JSON=\n%s\n" %
(str(joe), out))
raise joe
return json.loads(out)
return None
@ -683,7 +554,7 @@ def pv_resize(device, size_bytes, create_options):
cmd.extend(options_to_cli_args(create_options))
if size_bytes != 0:
cmd.extend(['--yes', '--setphysicalvolumesize', '%dB' % size_bytes])
cmd.extend(['--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend([device])
return call(cmd)
@ -748,10 +619,10 @@ def vg_reduce(vg_name, missing, pv_devices, reduce_options):
cmd = ['vgreduce']
cmd.extend(options_to_cli_args(reduce_options))
if len(pv_devices) == 0:
cmd.append('--all')
if missing:
cmd.append('--removemissing')
elif len(pv_devices) == 0:
cmd.append('--all')
cmd.append(vg_name)
cmd.extend(pv_devices)
@ -779,12 +650,12 @@ def vg_allocation_policy(vg_name, policy, policy_options):
def vg_max_pv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(int(number))],
return _vg_value_set(vg_name, ['--maxphysicalvolumes', str(number)],
max_options)
def vg_max_lv(vg_name, number, max_options):
return _vg_value_set(vg_name, ['-l', str(int(number))], max_options)
return _vg_value_set(vg_name, ['-l', str(number)], max_options)
def vg_uuid_gen(vg_name, ignore, options):
@ -826,7 +697,6 @@ def activate_deactivate(op, name, activate, control_flags, options):
op += 'n'
cmd.append(op)
cmd.append("-y")
cmd.append(name)
return call(cmd)
@ -862,8 +732,8 @@ def lv_retrieve_with_segments():
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent',
'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
cmd = _dc('lvs', ['-a', '-o', ','.join(columns)])
rc, out, err = call(cmd)
@ -880,4 +750,4 @@ if __name__ == '__main__':
pv_data = pv_retrieve_with_segs()
for p in pv_data:
print(str(p))
log_debug(str(p))

View File

@ -11,211 +11,20 @@ from .pv import load_pvs
from .vg import load_vgs
from .lv import load_lvs
from . import cfg
from .utils import MThreadRunner, log_debug, log_error
import threading
import queue
import time
import traceback
def _main_thread_load(refresh=True, emit_signal=True):
def load(refresh=True, emit_signal=True, cache_refresh=True, log=True):
num_total_changes = 0
to_remove = []
(changes, remove) = load_pvs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1:]
num_total_changes += changes
to_remove.extend(remove)
(changes, remove) = load_vgs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1:]
num_total_changes += changes
to_remove.extend(remove)
(lv_changes, remove) = load_lvs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1:]
num_total_changes += lv_changes
to_remove.extend(remove)
# When the LVs change it can cause another change in the VGs which is
# missed if we don't scan through the VGs again. We could achieve this
# the other way and re-scan the LVs, but in general there are more LVs than
# VGs, thus this should be more efficient. This happens when a LV interface
# changes causing the dbus object representing it to be removed and
# recreated.
if refresh and lv_changes > 0:
(changes, remove) = load_vgs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1:]
num_total_changes += changes
to_remove.extend(remove)
# Remove any objects that are no longer needed. We do this after we process
# all the objects to ensure that references still exist for objects that
# are processed after them.
to_remove.reverse()
for i in to_remove:
dbus_obj = cfg.om.get_object_by_path(i)
if dbus_obj:
cfg.om.remove_object(dbus_obj, True)
num_total_changes += 1
return num_total_changes
def load(refresh=True, emit_signal=True, cache_refresh=True, log=True,
need_main_thread=True):
# Go through and load all the PVs, VGs and LVs
if cache_refresh:
cfg.db.refresh(log)
if need_main_thread:
rc = MThreadRunner(_main_thread_load, refresh, emit_signal).done()
else:
rc = _main_thread_load(refresh, emit_signal)
num_total_changes += load_pvs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_vgs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_lvs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
return rc
# Even though lvm can handle multiple changes concurrently it really doesn't
# make sense to make a 1-1 fetch of data for each change of lvm because when
# we fetch the data once all previous changes are reflected.
class StateUpdate(object):
class UpdateRequest(object):
def __init__(self, refresh, emit_signal, cache_refresh, log,
need_main_thread):
self.is_done = False
self.refresh = refresh
self.emit_signal = emit_signal
self.cache_refresh = cache_refresh
self.log = log
self.need_main_thread = need_main_thread
self.result = None
self.cond = threading.Condition(threading.Lock())
def done(self):
with self.cond:
if not self.is_done:
self.cond.wait()
return self.result
def set_result(self, result):
with self.cond:
self.result = result
self.is_done = True
self.cond.notify_all()
@staticmethod
def update_thread(obj):
exception_count = 0
queued_requests = []
while cfg.run.value != 0:
# noinspection PyBroadException
try:
refresh = True
emit_signal = True
cache_refresh = True
log = True
need_main_thread = True
with obj.lock:
wait = not obj.deferred
obj.deferred = False
if len(queued_requests) == 0 and wait:
queued_requests.append(obj.queue.get(True, 2))
# Ok we have one or the deferred queue has some,
# check if any others
try:
while True:
queued_requests.append(obj.queue.get(False))
except queue.Empty:
pass
if len(queued_requests) > 1:
log_debug("Processing %d updates!" % len(queued_requests),
'bg_black', 'fg_light_green')
# We have what we can, run the update with the needed options
for i in queued_requests:
if not i.refresh:
refresh = False
if not i.emit_signal:
emit_signal = False
if not i.cache_refresh:
cache_refresh = False
if not i.log:
log = False
if not i.need_main_thread:
need_main_thread = False
num_changes = load(refresh, emit_signal, cache_refresh, log,
need_main_thread)
# Update is done, let everyone know!
for i in queued_requests:
i.set_result(num_changes)
# Only clear out the requests after we have given them a result
# otherwise we can orphan the waiting threads and they never
# wake up if we get an exception
queued_requests = []
# We retrieved OK, clear exception count
exception_count = 0
except queue.Empty:
pass
except Exception as e:
st = traceback.format_exc()
log_error("update_thread exception: \n%s" % st)
cfg.blackbox.dump()
exception_count += 1
if exception_count >= 5:
for i in queued_requests:
i.set_result(e)
log_error("Too many errors in update_thread, exiting daemon")
cfg.exit_daemon()
else:
# Slow things down when encountering errors
time.sleep(1)
def __init__(self):
self.lock = threading.RLock()
self.queue = queue.Queue()
self.deferred = False
# Do initial load
load(refresh=False, emit_signal=False, need_main_thread=False)
self.thread = threading.Thread(target=StateUpdate.update_thread,
args=(self,),
name="StateUpdate.update_thread")
def load(self, refresh=True, emit_signal=True, cache_refresh=True,
log=True, need_main_thread=True):
# Place this request on the queue and wait for it to be completed
req = StateUpdate.UpdateRequest(refresh, emit_signal, cache_refresh,
log, need_main_thread)
self.queue.put(req)
return req.done()
def event(self):
with self.lock:
self.deferred = True
return num_total_changes

View File

@ -8,55 +8,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .automatedproperties import AutomatedProperties
from .utils import job_obj_path_generate, mt_async_call
from .utils import job_obj_path_generate
from . import cfg
from .cfg import JOB_INTERFACE
import dbus
import threading
# noinspection PyUnresolvedReferences
from gi.repository import GLib
# Class that handles a client waiting for something to be complete. We either
# get a timeout or the operation is done.
class WaitingClient(object):
# A timeout occurred
@staticmethod
def _timeout(wc):
with wc.rlock:
if wc.in_use:
wc.in_use = False
# Remove ourselves from waiting client
wc.job_state.remove_waiting_client(wc)
wc.timer_id = -1
mt_async_call(wc.cb, wc.job_state.Complete)
wc.job_state = None
def __init__(self, job_state, tmo, cb, cbe):
self.rlock = threading.RLock()
self.job_state = job_state
self.cb = cb
self.cbe = cbe
self.in_use = True # Indicates if object is in play
self.timer_id = -1
if tmo > 0:
self.timer_id = GLib.timeout_add_seconds(
tmo, WaitingClient._timeout, self)
# The job finished before the timer popped and we are being notified that
# it's done
def notify(self):
with self.rlock:
if self.in_use:
self.in_use = False
# Clear timer
if self.timer_id != -1:
GLib.source_remove(self.timer_id)
self.timer_id = -1
mt_async_call(self.cb, self.job_state.Complete)
self.job_state = None
from . import background
# noinspection PyPep8Naming
@ -67,9 +24,9 @@ class JobState(object):
self._percent = 0
self._complete = False
self._request = request
self._cond = threading.Condition(self.rlock)
self._ec = 0
self._stderr = ''
self._waiting_clients = []
# This is an lvm command that is just taking too long and doesn't
# support background operation
@ -100,7 +57,7 @@ class JobState(object):
with self.rlock:
self._complete = value
self._percent = 100
self.notify_waiting_clients()
self._cond.notify_all()
@property
def GetError(self):
@ -114,10 +71,29 @@ class JobState(object):
else:
return (-1, 'Job is not complete!')
def set_result(self, ec, msg):
with self.rlock:
self.Complete = True
self._ec = ec
self._stderr = msg
def dtor(self):
with self.rlock:
self._request = None
def Wait(self, timeout):
try:
with self._cond:
# Check to see if we are done, before we wait
if not self.Complete:
if timeout != -1:
self._cond.wait(timeout)
else:
self._cond.wait()
return self.Complete
except RuntimeError:
return False
@property
def Result(self):
with self.rlock:
@ -125,36 +101,6 @@ class JobState(object):
return self._request.result()
return '/'
def add_waiting_client(self, client):
with self.rlock:
# Avoid race condition where it goes complete before we get added
# to the list of waiting clients
if self.Complete:
client.notify()
else:
self._waiting_clients.append(client)
def remove_waiting_client(self, client):
# If a waiting client timer pops before the job is done we will allow
# the client to remove themselves from the list. As we have a lock
# here and a lock in the waiting client too, and they can be obtained
# in different orders, a dead lock can occur.
# As this remove is really optional, we will try to acquire the lock
# and remove. If we are unsuccessful it's not fatal, we just delay
# the time when the objects can be garbage collected by python
if self.rlock.acquire(False):
try:
self._waiting_clients.remove(client)
finally:
self.rlock.release()
def notify_waiting_clients(self):
with self.rlock:
for c in self._waiting_clients:
c.notify()
self._waiting_clients = []
# noinspection PyPep8Naming
class Job(AutomatedProperties):
@ -176,24 +122,25 @@ class Job(AutomatedProperties):
def Percent(self):
return dbus.Double(float(self.state.Percent))
@Percent.setter
def Percent(self, value):
self.state.Percent = value
@property
def Complete(self):
return dbus.Boolean(self.state.Complete)
@staticmethod
def _signal_complete(obj):
obj.PropertiesChanged(
JOB_INTERFACE, dict(Complete=dbus.Boolean(obj.state.Complete)), [])
@Complete.setter
def Complete(self, value):
self.state.Complete = value
mt_async_call(Job._signal_complete, self)
@property
def GetError(self):
return dbus.Struct(self.state.GetError, signature="(is)")
def set_result(self, ec, msg):
self.state.set_result(ec, msg)
@dbus.service.method(dbus_interface=JOB_INTERFACE)
def Remove(self):
if self.state.Complete:
@ -208,11 +155,7 @@ class Job(AutomatedProperties):
out_signature='b',
async_callbacks=('cb', 'cbe'))
def Wait(self, timeout, cb, cbe):
if timeout == 0 or self.state.Complete:
cb(dbus.Boolean(self.state.Complete))
else:
self.state.add_waiting_client(
WaitingClient(self.state, timeout, cb, cbe))
background.add_wait(self, timeout, cb, cbe)
@property
def Result(self):

View File

@ -75,10 +75,11 @@ def common(retrieve, o_type, search_keys,
object_path = None
to_remove = []
if refresh:
to_remove = list(existing_paths.keys())
for k in list(existing_paths.keys()):
cfg.om.remove_object(cfg.om.get_object_by_path(k), True)
num_changes += 1
num_changes += len(rc)
return rc, num_changes, to_remove
return rc, num_changes

View File

@ -10,22 +10,20 @@
from .automatedproperties import AutomatedProperties
from . import utils
from .utils import vg_obj_path_generate, log_error, _handle_execute
from .utils import vg_obj_path_generate
import dbus
from . import cmdhandler
from . import cfg
from .cfg import LV_INTERFACE, THIN_POOL_INTERFACE, SNAPSHOT_INTERFACE, \
LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED, VDO_POOL_INTERFACE
LV_COMMON_INTERFACE, CACHE_POOL_INTERFACE, LV_CACHED
from .request import RequestEntry
from .utils import n, n32, d
from .utils import n, n32
from .loader import common
from .state import State
from . import background
from .utils import round_size, mt_remove_dbus_objects
from .utils import round_size
from .job import JobState
import traceback
# Try and build a key for a LV, so that we sort the LVs with least dependencies
# first. This may be error prone because of the flexibility LVM
@ -74,66 +72,23 @@ def lvs_state_retrieve(selection, cache_refresh=True):
lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key)
for l in lvs:
if cfg.vdo_support:
rc.append(LvStateVdo(
l['lv_uuid'], l['lv_name'],
l['lv_path'], n(l['lv_size']),
l['vg_name'],
l['vg_uuid'], l['pool_lv_uuid'],
l['pool_lv'], l['origin_uuid'], l['origin'],
n32(l['data_percent']), l['lv_attr'],
l['lv_tags'], l['lv_active'], l['data_lv'],
l['metadata_lv'], l['segtype'], l['lv_role'],
l['lv_layout'],
n32(l['snap_percent']),
n32(l['metadata_percent']),
n32(l['copy_percent']),
n32(l['sync_percent']),
n(l['lv_metadata_size']),
l['move_pv'],
l['move_pv_uuid'],
l['vdo_operating_mode'],
l['vdo_compression_state'],
l['vdo_index_state'],
n(l['vdo_used_size']),
d(l['vdo_saving_percent']),
l['vdo_compression'],
l['vdo_deduplication'],
l['vdo_use_metadata_hints'],
n32(l['vdo_minimum_io_size']),
n(l['vdo_block_map_cache_size']),
n32(l['vdo_block_map_era_length']),
l['vdo_use_sparse_index'],
n(l['vdo_index_memory_size']),
n(l['vdo_slab_size']),
n32(l['vdo_ack_threads']),
n32(l['vdo_bio_threads']),
n32(l['vdo_bio_rotation']),
n32(l['vdo_cpu_threads']),
n32(l['vdo_hash_zone_threads']),
n32(l['vdo_logical_threads']),
n32(l['vdo_physical_threads']),
n32(l['vdo_max_discard']),
l['vdo_write_policy'],
n32(l['vdo_header_size'])))
else:
rc.append(LvState(
l['lv_uuid'], l['lv_name'],
l['lv_path'], n(l['lv_size']),
l['vg_name'],
l['vg_uuid'], l['pool_lv_uuid'],
l['pool_lv'], l['origin_uuid'], l['origin'],
n32(l['data_percent']), l['lv_attr'],
l['lv_tags'], l['lv_active'], l['data_lv'],
l['metadata_lv'], l['segtype'], l['lv_role'],
l['lv_layout'],
n32(l['snap_percent']),
n32(l['metadata_percent']),
n32(l['copy_percent']),
n32(l['sync_percent']),
n(l['lv_metadata_size']),
l['move_pv'],
l['move_pv_uuid']))
rc.append(LvState(
l['lv_uuid'], l['lv_name'],
l['lv_path'], n(l['lv_size']),
l['vg_name'],
l['vg_uuid'], l['pool_lv_uuid'],
l['pool_lv'], l['origin_uuid'], l['origin'],
n32(l['data_percent']), l['lv_attr'],
l['lv_tags'], l['lv_active'], l['data_lv'],
l['metadata_lv'], l['segtype'], l['lv_role'],
l['lv_layout'],
n32(l['snap_percent']),
n32(l['metadata_percent']),
n32(l['copy_percent']),
n32(l['sync_percent']),
n(l['lv_metadata_size']),
l['move_pv'],
l['move_pv_uuid']))
return rc
@ -237,8 +192,6 @@ class LvState(State):
def _object_type_create(self):
if self.Attr[0] == 't':
return LvThinPool
elif self.Attr[0] == 'd':
return LvVdoPool
elif self.Attr[0] == 'C':
if 'pool' in self.layout:
return LvCachePool
@ -265,34 +218,6 @@ class LvState(State):
return (klass, path_method)
class LvStateVdo(LvState):
def __init__(self, Uuid, Name, Path, SizeBytes,
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
MetaDataPercent, CopyPercent, SyncPercent,
MetaDataSizeBytes, move_pv, move_pv_uuid,
vdo_operating_mode, vdo_compression_state, vdo_index_state,
vdo_used_size,vdo_saving_percent,vdo_compression,
vdo_deduplication,vdo_use_metadata_hints,
vdo_minimum_io_size,vdo_block_map_cache_size,
vdo_block_map_era_length,vdo_use_sparse_index,
vdo_index_memory_size,vdo_slab_size,vdo_ack_threads,
vdo_bio_threads,vdo_bio_rotation,vdo_cpu_threads,
vdo_hash_zone_threads,vdo_logical_threads,
vdo_physical_threads,vdo_max_discard,
vdo_write_policy,vdo_header_size):
super(LvStateVdo, self).__init__(Uuid, Name, Path, SizeBytes,
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
MetaDataPercent, CopyPercent, SyncPercent,
MetaDataSizeBytes, move_pv, move_pv_uuid)
utils.init_class_from_arguments(self, "vdo_", snake_to_pascal=True)
# noinspection PyPep8Naming
@utils.dbus_property(LV_COMMON_INTERFACE, 'Uuid', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Name', 's')
@ -307,6 +232,7 @@ class LvStateVdo(LvState):
@utils.dbus_property(LV_COMMON_INTERFACE, 'Attr', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SnapPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'CopyPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SyncPercent', 'u')
@ -346,36 +272,6 @@ class LvCommon(AutomatedProperties):
self.state = object_state
self._move_pv = self._get_move_pv()
@staticmethod
def handle_execute(rc, out, err):
_handle_execute(rc, out, err, LV_INTERFACE)
@staticmethod
def validate_dbus_object(lv_uuid, lv_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if not dbo:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return dbo
def attr_struct(self, index, type_map, default='undisclosed'):
try:
if self.state.Attr[index] not in type_map:
log_error("LV %s %s with lv_attr %s, lv_attr[%d] = "
"'%s' is not known" %
(self.Uuid, self.Name, self.Attr, index,
self.state.Attr[index]))
return dbus.Struct((self.state.Attr[index],
type_map.get(self.state.Attr[index], default)),
signature="(ss)")
except BaseException:
st = traceback.format_exc()
log_error("attr_struct: \n%s" % st)
return dbus.Struct(('?', 'Unavailable'), signature="(ss)")
@property
def VolumeType(self):
type_map = {'C': 'Cache', 'm': 'mirrored',
@ -388,16 +284,17 @@ class LvCommon(AutomatedProperties):
'l': 'mirror log device', 'c': 'under conversion',
'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data',
'e': 'raid or pool metadata or pool metadata spare',
'd': 'vdo pool', 'D': 'vdo pool data', 'g': 'integrity',
'-': 'Unspecified'}
return self.attr_struct(0, type_map)
return dbus.Struct((self.state.Attr[0], type_map[self.state.Attr[0]]),
signature="as")
@property
def Permissions(self):
type_map = {'w': 'writable', 'r': 'read-only',
'R': 'Read-only activation of non-read-only volume',
'-': 'Unspecified'}
return self.attr_struct(1, type_map)
return dbus.Struct((self.state.Attr[1], type_map[self.state.Attr[1]]),
signature="(ss)")
@property
def AllocationPolicy(self):
@ -406,7 +303,8 @@ class LvCommon(AutomatedProperties):
'i': 'inherited', 'I': 'inherited locked',
'l': 'cling', 'L': 'cling locked',
'n': 'normal', 'N': 'normal locked', '-': 'Unspecified'}
return self.attr_struct(2, type_map)
return dbus.Struct((self.state.Attr[2], type_map[self.state.Attr[2]]),
signature="(ss)")
@property
def FixedMinor(self):
@ -414,20 +312,15 @@ class LvCommon(AutomatedProperties):
@property
def State(self):
type_map = {'a': 'active',
's': 'suspended',
'I': 'Invalid snapshot',
type_map = {'a': 'active', 's': 'suspended', 'I': 'Invalid snapshot',
'S': 'invalid Suspended snapshot',
'm': 'snapshot merge failed',
'M': 'suspended snapshot (M)erge failed',
'd': 'mapped device present without tables',
'i': 'mapped device present with inactive table',
'h': 'historical',
'c': 'check needed suspended thin-pool',
'C': 'check needed',
'X': 'unknown',
'-': 'Unspecified'}
return self.attr_struct(4, type_map)
'X': 'unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[4], type_map[self.state.Attr[4]]),
signature="(ss)")
@property
def TargetType(self):
@ -443,18 +336,11 @@ class LvCommon(AutomatedProperties):
@property
def Health(self):
type_map = {'p': 'partial',
'r': 'refresh needed',
'm': 'mismatches',
'w': 'writemostly',
'X': 'unknown',
'-': 'unspecified',
's': 'reshaping',
'F': 'failed',
'D': 'Data space',
'R': 'Remove',
'M': 'Metadata'}
return self.attr_struct(8, type_map)
type_map = {'p': 'partial', 'r': 'refresh',
'm': 'mismatches', 'w': 'writemostly',
'X': 'X unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[8], type_map[self.state.Attr[8]]),
signature="(ss)")
@property
def SkipActivation(self):
@ -522,9 +408,25 @@ class Lv(LvCommon):
@staticmethod
def _remove(lv_uuid, lv_name, remove_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Remove the LV, if successful then remove from the model
LvCommon.handle_execute(*cmdhandler.lv_remove(lv_name, remove_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if dbo:
# Remove the LV, if successful then remove from the model
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
if rc == 0:
cfg.om.remove_object(dbo, True)
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return '/'
@dbus.service.method(
@ -542,10 +444,24 @@ class Lv(LvCommon):
@staticmethod
def _rename(lv_uuid, lv_name, new_name, rename_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Rename the logical volume
LvCommon.handle_execute(*cmdhandler.lv_rename(lv_name, new_name,
rename_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if dbo:
# Rename the logical volume
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
rename_options)
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return '/'
@dbus.service.method(
@ -579,25 +495,44 @@ class Lv(LvCommon):
pv_dests_and_ranges, move_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
cfg.worker_q.put(r)
@staticmethod
def _snap_shot(lv_uuid, lv_name, name, optional_size,
snapshot_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# If you specify a size you get a 'thick' snapshot even if
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes // 80
remainder = space % 512
optional_size = space + 512 - remainder
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
LvCommon.handle_execute(*cmdhandler.vg_lv_snapshot(
lv_name, snapshot_options,name, optional_size))
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
if dbo:
# If you specify a size you get a 'thick' snapshot even if
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes / 80
remainder = space % 512
optional_size = space + 512 - remainder
rc, out, err = cmdhandler.vg_lv_snapshot(
lv_name, snapshot_options, name, optional_size)
if rc == 0:
return_path = '/'
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
lvs = load_lvs([full_name], emit_signal=True)[0]
for l in lvs:
return_path = l.dbus_object_path()
# Refresh self and all included PVs
cfg.load(cache_refresh=False)
return return_path
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@ -620,23 +555,38 @@ class Lv(LvCommon):
resize_options):
# Make sure we have a dbus object representing it
pv_dests = []
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# If we have PVs, verify them
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'PV Destination (%s) not found' % pr[0])
if dbo:
# If we have PVs, verify them
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'PV Destination (%s) not found' % pr[0])
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
size_change = new_size_bytes - dbo.SizeBytes
LvCommon.handle_execute(*cmdhandler.lv_resize(
dbo.lvm_id, size_change,pv_dests, resize_options))
return "/"
size_change = new_size_bytes - dbo.SizeBytes
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
pv_dests, resize_options)
if rc == 0:
# Refresh what's changed
cfg.load()
return "/"
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@ -669,10 +619,23 @@ class Lv(LvCommon):
def _lv_activate_deactivate(uuid, lv_name, activate, control_flags,
options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
LvCommon.handle_execute(*cmdhandler.activate_deactivate(
'lvchange', lv_name, activate, control_flags, options))
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
if dbo:
rc, out, err = cmdhandler.activate_deactivate(
'lvchange', lv_name, activate, control_flags, options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@ -704,10 +667,25 @@ class Lv(LvCommon):
@staticmethod
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
LvCommon.handle_execute(*cmdhandler.lv_tag(
lv_name, tags_add, tags_del, tag_options))
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
if dbo:
rc, out, err = cmdhandler.lv_tag(
lv_name, tags_add, tags_del, tag_options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@ -743,152 +721,6 @@ class Lv(LvCommon):
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
@staticmethod
def _writecache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Make sure we have dbus object representing lv to cache
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
if lv_to_cache:
fcn = lv_to_cache.lv_full_name()
rc, out, err = cmdhandler.lv_writecache_lv(
dbo.lv_full_name(), fcn, cache_options)
if rc == 0:
# When we cache an LV, the cache pool and the lv that is getting
# cached need to be removed from the object manager and
# re-created as their interfaces have changed!
mt_remove_dbus_objects((dbo, lv_to_cache))
cfg.load()
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE, 'LV to cache with object path %s not present!' %
lv_object_path)
return lv_converted
@dbus.service.method(
dbus_interface=LV_INTERFACE,
in_signature='oia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def WriteCacheLv(self, lv_object, tmo, cache_options, cb, cbe):
r = RequestEntry(
tmo, Lv._writecache_lv,
(self.Uuid, self.lvm_id, lv_object,
cache_options), cb, cbe)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
@utils.dbus_property(VDO_POOL_INTERFACE, 'OperatingMode', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'CompressionState', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'IndexState', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'UsedSize', 't')
@utils.dbus_property(VDO_POOL_INTERFACE, 'SavingPercent', 'd')
@utils.dbus_property(VDO_POOL_INTERFACE, 'Compression', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'Deduplication', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'UseMetadataHints', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'MinimumIoSize', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'BlockMapCacheSize', "t")
@utils.dbus_property(VDO_POOL_INTERFACE, 'BlockMapEraLength', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'UseSparseIndex', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'IndexMemorySize', 't')
@utils.dbus_property(VDO_POOL_INTERFACE, 'SlabSize', 't')
@utils.dbus_property(VDO_POOL_INTERFACE, 'AckThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'BioThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'BioRotation', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'CpuThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'HashZoneThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'LogicalThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'PhysicalThreads', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'MaxDiscard', 'u')
@utils.dbus_property(VDO_POOL_INTERFACE, 'WritePolicy', 's')
@utils.dbus_property(VDO_POOL_INTERFACE, 'HeaderSize', 'u')
class LvVdoPool(Lv):
_DataLv_meta = ("o", VDO_POOL_INTERFACE)
def __init__(self, object_path, object_state):
super(LvVdoPool, self).__init__(object_path, object_state)
self.set_interface(VDO_POOL_INTERFACE)
self._data_lv, _ = self._get_data_meta()
@property
def DataLv(self):
return dbus.ObjectPath(self._data_lv)
@staticmethod
def _enable_disable_compression(pool_uuid, pool_name, enable, comp_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(pool_uuid, pool_name)
# Rename the logical volume
LvCommon.handle_execute(*cmdhandler.lv_vdo_compression(
pool_name, enable, comp_options))
return '/'
@dbus.service.method(
dbus_interface=VDO_POOL_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def EnableCompression(self, tmo, comp_options, cb, cbe):
r = RequestEntry(
tmo, LvVdoPool._enable_disable_compression,
(self.Uuid, self.lvm_id, True, comp_options),
cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=VDO_POOL_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def DisableCompression(self, tmo, comp_options, cb, cbe):
r = RequestEntry(
tmo, LvVdoPool._enable_disable_compression,
(self.Uuid, self.lvm_id, False, comp_options),
cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _enable_disable_deduplication(pool_uuid, pool_name, enable, dedup_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(pool_uuid, pool_name)
# Rename the logical volume
LvCommon.handle_execute(*cmdhandler.lv_vdo_deduplication(
pool_name, enable, dedup_options))
return '/'
@dbus.service.method(
dbus_interface=VDO_POOL_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def EnableDeduplication(self, tmo, dedup_options, cb, cbe):
r = RequestEntry(
tmo, LvVdoPool._enable_disable_deduplication,
(self.Uuid, self.lvm_id, True, dedup_options),
cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=VDO_POOL_INTERFACE,
in_signature='ia{sv}',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def DisableDeduplication(self, tmo, dedup_options, cb, cbe):
r = RequestEntry(
tmo, LvVdoPool._enable_disable_deduplication,
(self.Uuid, self.lvm_id, False, dedup_options),
cb, cbe, False)
cfg.worker_q.put(r)
# noinspection PyPep8Naming
class LvThinPool(Lv):
@ -911,11 +743,28 @@ class LvThinPool(Lv):
@staticmethod
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
LvCommon.handle_execute(*cmdhandler.lv_lv_create(
lv_name, create_options, name, size_bytes))
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
lv_created = '/'
if dbo:
rc, out, err = cmdhandler.lv_lv_create(
lv_name, create_options, name, size_bytes)
if rc == 0:
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
lvs = load_lvs([full_name], emit_signal=True)[0]
for l in lvs:
lv_created = l.dbus_object_path()
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return lv_created
@dbus.service.method(
dbus_interface=THIN_POOL_INTERFACE,
@ -952,13 +801,14 @@ class LvCachePool(Lv):
@staticmethod
def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# Make sure we have dbus object representing lv to cache
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
if lv_to_cache:
if dbo and lv_to_cache:
fcn = lv_to_cache.lv_full_name()
rc, out, err = cmdhandler.lv_cache_lv(
dbo.lv_full_name(), fcn, cache_options)
@ -966,18 +816,27 @@ class LvCachePool(Lv):
# When we cache an LV, the cache pool and the lv that is getting
# cached need to be removed from the object manager and
# re-created as their interfaces have changed!
mt_remove_dbus_objects((dbo, lv_to_cache))
cfg.om.remove_object(dbo, emit_signal=True)
cfg.om.remove_object(lv_to_cache, emit_signal=True)
cfg.load()
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE, 'LV to cache with object path %s not present!' %
lv_object_path)
msg = ""
if not dbo:
dbo += 'CachePool LV with uuid %s and name %s not present!' % \
(lv_uuid, lv_name)
if not lv_to_cache:
dbo += 'LV to cache with object path %s not present!' % \
(lv_object_path)
raise dbus.exceptions.DBusException(LV_INTERFACE, msg)
return lv_converted
@dbus.service.method(
@ -1008,25 +867,32 @@ class LvCacheLv(Lv):
@staticmethod
def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# Get current cache name
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
if dbo:
rc, out, err = cmdhandler.lv_detach_cache(
dbo.lv_full_name(), detach_options, destroy_cache)
if rc == 0:
# The cache pool gets removed as hidden and put back to
# visible, so lets delete
mt_remove_dbus_objects((cache_pool, dbo))
cfg.load()
# Get current cache name
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
rc, out, err = cmdhandler.lv_detach_cache(
dbo.lv_full_name(), detach_options, destroy_cache)
if rc == 0:
# The cache pool gets removed as hidden and put back to
# visible, so lets delete
cfg.om.remove_object(cache_pool, emit_signal=True)
cfg.om.remove_object(dbo, emit_signal=True)
cfg.load()
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return uncached_lv_path
@dbus.service.method(
@ -1060,4 +926,4 @@ class LvSnapShot(Lv):
(SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id,
merge_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
cfg.worker_q.put(r)

View File

@ -1,4 +1,4 @@
#!@PYTHON3@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
@ -13,6 +13,7 @@
import subprocess
import shlex
from fcntl import fcntl, F_GETFL, F_SETFL
import os
import traceback
import sys
@ -28,8 +29,7 @@ except ImportError:
from lvmdbusd.cfg import LVM_CMD
from lvmdbusd.utils import log_debug, log_error, add_no_notify, make_non_block,\
read_decoded
from lvmdbusd.utils import log_debug, log_error
SHELL_PROMPT = "lvm> "
@ -42,74 +42,59 @@ def _quote_arg(arg):
class LVMShellProxy(object):
# Read until we get prompt back and a result
# @param: no_output Caller expects no output to report FD
# Returns stdout, report, stderr (report is JSON!)
def _read_until_prompt(self, no_output=False):
def _read_until_prompt(self):
stdout = ""
report = ""
stderr = ""
keep_reading = True
extra_passes = 3
report_json = {}
prev_report_len = 0
# Try reading from all FDs to prevent one from filling up and causing
# a hang. Keep reading until we get the prompt back and the report
# FD does not contain valid JSON
while keep_reading:
# a hang. We are also assuming that we won't get the lvm prompt back
# until we have already received all the output from stderr and the
# report descriptor too.
while not stdout.endswith(SHELL_PROMPT):
try:
rd_fd = [
self.lvm_shell.stdout.fileno(),
self.report_stream.fileno(),
self.report_r,
self.lvm_shell.stderr.fileno()]
ready = select.select(rd_fd, [], [], 2)
for r in ready[0]:
if r == self.lvm_shell.stdout.fileno():
stdout += read_decoded(self.lvm_shell.stdout)
elif r == self.report_stream.fileno():
report += read_decoded(self.report_stream)
while True:
tmp = self.lvm_shell.stdout.read()
if tmp:
stdout += tmp.decode("utf-8")
else:
break
elif r == self.report_r:
while True:
tmp = os.read(self.report_r, 16384)
if tmp:
report += tmp.decode("utf-8")
if len(tmp) != 16384:
break
else:
break
elif r == self.lvm_shell.stderr.fileno():
stderr += read_decoded(self.lvm_shell.stderr)
while True:
tmp = self.lvm_shell.stderr.read()
if tmp:
stderr += tmp.decode("utf-8")
else:
break
# Check to see if the lvm process died on us
if self.lvm_shell.poll():
raise Exception(self.lvm_shell.returncode, "%s" % stderr)
if stdout.endswith(SHELL_PROMPT):
if no_output:
keep_reading = False
else:
cur_report_len = len(report)
if cur_report_len != 0:
# Only bother to parse if we have more data
if prev_report_len != cur_report_len:
prev_report_len = cur_report_len
# Parse the JSON if it's good we are done,
# if not we will try to read some more.
try:
report_json = json.loads(report)
keep_reading = False
except ValueError:
pass
if keep_reading:
extra_passes -= 1
if extra_passes <= 0:
if len(report):
raise ValueError("Invalid json: %s" %
report)
else:
raise ValueError(
"lvm returned no JSON output!")
except IOError as ioe:
log_debug(str(ioe))
pass
return stdout, report_json, stderr
return stdout, report, stderr
def _write_cmd(self, cmd):
cmd_bytes = bytes(cmd, "utf-8")
@ -129,10 +114,7 @@ class LVMShellProxy(object):
except FileExistsError:
pass
# We have to open non-blocking as the other side isn't open until
# we actually fork the process.
self.report_fd = os.open(tmp_file, os.O_NONBLOCK)
self.report_stream = os.fdopen(self.report_fd, 'rb', 0)
self.report_r = os.open(tmp_file, os.O_NONBLOCK)
# Setup the environment for using our own socket for reporting
local_env = copy.deepcopy(os.environ)
@ -143,6 +125,9 @@ class LVMShellProxy(object):
# when utilizing the lvm shell.
local_env["LVM_LOG_FILE_MAX_LINES"] = "0"
flags = fcntl(self.report_r, F_GETFL)
fcntl(self.report_r, F_SETFL, flags | os.O_NONBLOCK)
# run the lvm shell
self.lvm_shell = subprocess.Popen(
[LVM_CMD + " 32>%s" % tmp_file],
@ -150,18 +135,20 @@ class LVMShellProxy(object):
stderr=subprocess.PIPE, close_fds=True, shell=True)
try:
make_non_block(self.lvm_shell.stdout)
make_non_block(self.lvm_shell.stderr)
flags = fcntl(self.lvm_shell.stdout, F_GETFL)
fcntl(self.lvm_shell.stdout, F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl(self.lvm_shell.stderr, F_GETFL)
fcntl(self.lvm_shell.stderr, F_SETFL, flags | os.O_NONBLOCK)
# wait for the first prompt
errors = self._read_until_prompt(no_output=True)[2]
errors = self._read_until_prompt()[2]
if errors and len(errors):
raise RuntimeError(errors)
except:
raise
finally:
# These will get deleted when the FD count goes to zero so we
# can be sure to clean up correctly no matter how we finish
# These will get deleted when the FD count goes to zero so we can be
# sure to clean up correctly no matter how we finish
os.unlink(tmp_file)
os.rmdir(tmp_dir)
@ -170,32 +157,39 @@ class LVMShellProxy(object):
self._write_cmd('lastlog\n')
# read everything from the STDOUT to the next prompt
stdout, report_json, stderr = self._read_until_prompt()
if 'log' in report_json:
error_msg = ""
# Walk the entire log array and build an error string
for log_entry in report_json['log']:
if log_entry['log_type'] == "error":
if error_msg:
error_msg += ', ' + log_entry['log_message']
else:
error_msg = log_entry['log_message']
stdout, report, stderr = self._read_until_prompt()
return error_msg
try:
log = json.loads(report)
return 'No error reason provided! (missing "log" section)'
if 'log' in log:
error_msg = ""
# Walk the entire log array and build an error string
for log_entry in log['log']:
if log_entry['log_type'] == "error":
if error_msg:
error_msg += ', ' + log_entry['log_message']
else:
error_msg = log_entry['log_message']
return error_msg
return 'No error reason provided! (missing "log" section)'
except ValueError:
log_error("Invalid JSON returned from LVM")
log_error("BEGIN>>\n%s\n<<END" % report)
return "Invalid JSON returned from LVM when retrieving exit code"
def call_lvm(self, argv, debug=False):
rc = 1
error_msg = ""
json_result = ""
if self.lvm_shell.poll():
raise Exception(
self.lvm_shell.returncode,
"Underlying lvm shell process is not present!")
argv = add_no_notify(argv)
# create the command string
cmd = " ".join(_quote_arg(arg) for arg in argv)
cmd += "\n"
@ -204,24 +198,23 @@ class LVMShellProxy(object):
self._write_cmd(cmd)
# read everything from the STDOUT to the next prompt
stdout, report_json, stderr = self._read_until_prompt()
stdout, report, stderr = self._read_until_prompt()
# Parse the report to see what happened
if 'log' in report_json:
ret_code = int(report_json['log'][-1:][0]['log_ret_code'])
# If we have an exported vg we get a log_ret_code == 5 when
# we do a 'fullreport'
if (ret_code == 1) or (ret_code == 5 and argv[0] == 'fullreport'):
rc = 0
else:
error_msg = self.get_error_msg()
if report and len(report):
json_result = json.loads(report)
if 'log' in json_result:
if json_result['log'][-1:][0]['log_ret_code'] == '1':
rc = 0
else:
error_msg = self.get_error_msg()
if debug or rc != 0:
log_error(('CMD: %s' % cmd))
log_error(("EC = %d" % rc))
log_error(("ERROR_MSG=\n %s\n" % error_msg))
return rc, report_json, error_msg
return rc, json_result, error_msg
def exit_shell(self):
try:
@ -258,3 +251,5 @@ if __name__ == "__main__":
pass
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
print()

View File

@ -1,4 +1,4 @@
#!@PYTHON3@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
@ -20,7 +20,7 @@ from lvmdbusd.utils import log_debug, log_error
class DataStore(object):
def __init__(self, usejson=True, vdo_support=False):
def __init__(self, usejson=True):
self.pvs = {}
self.vgs = {}
self.lvs = {}
@ -43,8 +43,6 @@ class DataStore(object):
else:
self.json = usejson
self.vdo_support = vdo_support
@staticmethod
def _insert_record(table, key, record, allowed_multiple):
if key in table:
@ -70,20 +68,6 @@ class DataStore(object):
else:
table[key] = record
@staticmethod
def _pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup):
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
@staticmethod
def _parse_pvs(_pvs):
pvs = sorted(_pvs, key=lambda pk: pk['pv_name'])
@ -97,7 +81,18 @@ class DataStore(object):
c_pvs, p['pv_uuid'], p,
['pvseg_start', 'pvseg_size', 'segtype'])
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
return c_pvs, c_lookup, c_pvs_in_vgs
@staticmethod
@ -137,28 +132,29 @@ class DataStore(object):
i['pvseg_size'] = i['pv_pe_count']
i['segtype'] = 'free'
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
return c_pvs, c_lookup, c_pvs_in_vgs
@staticmethod
def _parse_vgs(_vgs):
vgs = sorted(_vgs, key=lambda vk: vk['vg_uuid'])
vgs = sorted(_vgs, key=lambda vk: vk['vg_name'])
c_vgs = OrderedDict()
c_lookup = {}
for i in vgs:
vg_name = i['vg_name']
# Lvm allows duplicate vg names. When this occurs, each subsequent
# matching VG name will be called vg_name:vg_uuid. Note: ':' is an
# invalid character for lvm VG names
if vg_name in c_lookup:
vg_name = "%s:%s" % (vg_name, i['vg_uuid'])
i['vg_name'] = vg_name
c_lookup[vg_name] = i['vg_uuid']
c_lookup[i['vg_name']] = i['vg_uuid']
DataStore._insert_record(c_vgs, i['vg_uuid'], i, [])
return c_vgs, c_lookup
@ -173,22 +169,13 @@ class DataStore(object):
tmp_vg.extend(r['vg'])
# Sort for consistent output, however this is optional
vgs = sorted(tmp_vg, key=lambda vk: vk['vg_uuid'])
vgs = sorted(tmp_vg, key=lambda vk: vk['vg_name'])
c_vgs = OrderedDict()
c_lookup = {}
for i in vgs:
vg_name = i['vg_name']
# Lvm allows duplicate vg names. When this occurs, each subsequent
# matching VG name will be called vg_name:vg_uuid. Note: ':' is an
# invalid character for lvm VG names
if vg_name in c_lookup:
vg_name = "%s:%s" % (vg_name, i['vg_uuid'])
i['vg_name'] = vg_name
c_lookup[vg_name] = i['vg_uuid']
c_lookup[i['vg_name']] = i['vg_uuid']
c_vgs[i['vg_uuid']] = i
return c_vgs, c_lookup
@ -243,7 +230,8 @@ class DataStore(object):
return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
def _parse_lvs_json(self, _all):
@staticmethod
def _parse_lvs_json(_all):
c_lvs = OrderedDict()
c_lv_full_lookup = {}
@ -263,13 +251,8 @@ class DataStore(object):
if 'seg' in r:
for s in r['seg']:
r = c_lvs[s['lv_uuid']]
r.setdefault('seg_pe_ranges', []).\
append(s['seg_pe_ranges'])
r.setdefault('seg_pe_ranges', []).append(s['seg_pe_ranges'])
r.setdefault('segtype', []).append(s['segtype'])
if self.vdo_support:
for seg_key, seg_val in s.items():
if seg_key.startswith("vdo_"):
r[seg_key] = seg_val
return DataStore._parse_lvs_common(c_lvs, c_lv_full_lookup)
@ -545,10 +528,6 @@ if __name__ == "__main__":
for v in ds.vgs.values():
pp.pprint(v)
print("VG name to UUID")
for k, v in ds.vg_name_to_uuid.items():
print("%s: %s" % (k, v))
print("LVS")
for v in ds.lvs.values():
pp.pprint(v)

View File

@ -1,4 +1,4 @@
#!@PYTHON3@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#

View File

@ -10,7 +10,7 @@
from . import cfg
from . import objectmanager
from . import utils
from .cfg import BUS_NAME, BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH
from .cfg import BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH
import threading
from . import cmdhandler
import time
@ -20,8 +20,9 @@ import dbus.mainloop.glib
from . import lvmdb
# noinspection PyUnresolvedReferences
from gi.repository import GLib
from .fetch import StateUpdate
from .fetch import load
from .manager import Manager
from .background import background_reaper
import traceback
import queue
from . import udevwatch
@ -29,8 +30,7 @@ from .utils import log_debug, log_error
import argparse
import os
import sys
from .cmdhandler import LvmFlightRecorder, supports_vdo
from .request import RequestEntry
from .refresh import handle_external_event, event_complete
class Lvm(objectmanager.ObjectManager):
@ -38,16 +38,54 @@ class Lvm(objectmanager.ObjectManager):
super(Lvm, self).__init__(object_path, BASE_INTERFACE)
def _discard_pending_refreshes():
# We just handled a refresh, if we have any in the queue they can be
# removed because by definition they are older than the refresh we just did.
# As we limit the number of refreshes getting into the queue
# we should only ever have one to remove.
requests = []
while not cfg.worker_q.empty():
try:
r = cfg.worker_q.get(block=False)
if r.method != handle_external_event:
requests.append(r)
else:
# Make sure we make this event complete even though it didn't
# run, otherwise no other events will get processed
event_complete()
break
except queue.Empty:
break
# Any requests we removed, but did not discard need to be re-queued
for r in requests:
cfg.worker_q.put(r)
def process_request():
while cfg.run.value != 0:
# noinspection PyBroadException
try:
req = cfg.worker_q.get(True, 5)
start = cfg.db.num_refreshes
log_debug(
"Method start: %s with args %s (callback = %s)" %
(str(req.method), str(req.arguments), str(req.cb)))
"Running method: %s with args %s" %
(str(req.method), str(req.arguments)))
req.run_cmd()
log_debug("Method complete: %s" % str(req.method))
end = cfg.db.num_refreshes
num_refreshes = end - start
if num_refreshes > 0:
_discard_pending_refreshes()
if num_refreshes > 1:
log_debug(
"Inspect method %s for too many refreshes" %
(str(req.method)))
log_debug("Complete ")
except queue.Empty:
pass
except Exception:
@ -55,32 +93,6 @@ def process_request():
utils.log_error("process_request exception: \n%s" % st)
def check_bb_size(value):
v = int(value)
if v < 0:
raise argparse.ArgumentTypeError(
"positive integers only ('%s' invalid)" % value)
return v
def install_signal_handlers():
# Because of the glib main loop stuff the python signal handler code is
# apparently not usable and we need to use the glib calls instead
signal_add = None
if hasattr(GLib, 'unix_signal_add'):
signal_add = GLib.unix_signal_add
elif hasattr(GLib, 'unix_signal_add_full'):
signal_add = GLib.unix_signal_add_full
if signal_add:
signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, utils.handler, signal.SIGHUP)
signal_add(GLib.PRIORITY_HIGH, signal.SIGINT, utils.handler, signal.SIGINT)
signal_add(GLib.PRIORITY_HIGH, signal.SIGUSR1, utils.handler, signal.SIGUSR1)
else:
log_error("GLib.unix_signal_[add|add_full] are NOT available!")
def main():
start = time.time()
# Add simple command line handling
@ -103,12 +115,6 @@ def main():
help="Use the lvm shell, not fork & exec lvm",
default=False,
dest='use_lvm_shell')
parser.add_argument(
"--blackboxsize",
help="Size of the black box flight recorder, 0 to disable",
default=10,
type=check_bb_size,
dest='bb_size')
use_session = os.getenv('LVMDBUSD_USE_SESSION', False)
@ -116,64 +122,53 @@ def main():
os.environ["LC_ALL"] = "C"
cfg.args = parser.parse_args()
cfg.create_request_entry = RequestEntry
# We create a flight recorder in cmdhandler too, but we replace it here
# as the user may be specifying a different size. The default one in
# cmdhandler is for when we are running other code with a different main.
cfg.blackbox = LvmFlightRecorder(cfg.args.bb_size)
if cfg.args.use_lvm_shell and not cfg.args.use_json:
log_error("You cannot specify --lvmshell and --nojson")
sys.exit(1)
# We will dynamically add interfaces which support vdo if it
# exists.
cfg.vdo_support = supports_vdo()
if cfg.vdo_support and not cfg.args.use_json:
log_error("You cannot specify --nojson when lvm has VDO support")
sys.exit(1)
cmdhandler.set_execution(cfg.args.use_lvm_shell)
# List of threads that we start up
thread_list = []
install_signal_handlers()
# Install signal handlers
for s in [signal.SIGHUP, signal.SIGINT]:
try:
signal.signal(s, utils.handler)
except RuntimeError:
pass
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
dbus.mainloop.glib.threads_init()
cmdhandler.set_execution(cfg.args.use_lvm_shell)
if use_session:
cfg.bus = dbus.SessionBus()
else:
cfg.bus = dbus.SystemBus()
# The base name variable needs to exist for things to work.
# noinspection PyUnusedLocal
base_name = dbus.service.BusName(BUS_NAME, cfg.bus)
base_name = dbus.service.BusName(BASE_INTERFACE, cfg.bus)
cfg.om = Lvm(BASE_OBJ_PATH)
cfg.om.register_object(Manager(MANAGER_OBJ_PATH))
cfg.db = lvmdb.DataStore(cfg.args.use_json, cfg.vdo_support)
cfg.load = load
# Using a thread to process requests, we cannot hang the dbus library
# thread that is handling the dbus interface
cfg.db = lvmdb.DataStore(cfg.args.use_json)
# Start up thread to monitor pv moves
thread_list.append(
threading.Thread(target=process_request, name='process_request'))
threading.Thread(target=background_reaper, name="pv_move_reaper"))
# Have a single thread handling updating lvm and the dbus model so we
# don't have multiple threads doing this as the same time
updater = StateUpdate()
thread_list.append(updater.thread)
cfg.load = updater.load
# Using a thread to process requests.
thread_list.append(threading.Thread(target=process_request))
cfg.load(refresh=False, emit_signal=False)
cfg.loop = GLib.MainLoop()
for thread in thread_list:
thread.damon = True
thread.start()
for process in thread_list:
process.damon = True
process.start()
# Add udev watching
if cfg.args.use_udev:
@ -195,10 +190,8 @@ def main():
cfg.loop.run()
udevwatch.remove()
for thread in thread_list:
thread.join()
for process in thread_list:
process.join()
except KeyboardInterrupt:
# If we are unable to register signal handler, we will end up here when
# the service gets a ^C or a kill -2 <parent pid>
utils.handler(signal.SIGINT)
utils.handler(signal.SIGINT, None)
return 0

View File

@ -6,6 +6,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .automatedproperties import AutomatedProperties
from . import utils
@ -13,7 +14,9 @@ from .cfg import MANAGER_INTERFACE
import dbus
from . import cfg
from . import cmdhandler
from .fetch import load_pvs, load_vgs
from .request import RequestEntry
from .refresh import event_add
from . import udevwatch
@ -27,17 +30,7 @@ class Manager(AutomatedProperties):
@property
def Version(self):
return dbus.String('1.1.0')
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return dbus.String('1.0.0')
@staticmethod
def _pv_create(device, create_options):
@ -47,11 +40,20 @@ class Manager(AutomatedProperties):
pv = cfg.om.get_object_path_by_uuid_lvm_id(device, device)
if pv:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE, "PV %s Already exists!" % device)
MANAGER_INTERFACE, "PV Already exists!")
created_pv = []
rc, out, err = cmdhandler.pv_create(create_options, [device])
Manager.handle_execute(rc, out, err)
return cfg.om.get_object_path_by_lvm_id(device)
if rc == 0:
pvs = load_pvs([device], emit_signal=True)[0]
for p in pvs:
created_pv = p.dbus_object_path()
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return created_pv
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
@ -78,8 +80,20 @@ class Manager(AutomatedProperties):
MANAGER_INTERFACE, 'object path = %s not found' % p)
rc, out, err = cmdhandler.vg_create(create_options, pv_devices, name)
Manager.handle_execute(rc, out, err)
return cfg.om.get_object_path_by_lvm_id(name)
created_vg = "/"
if rc == 0:
vgs = load_vgs([name], emit_signal=True)[0]
for v in vgs:
created_vg = v.dbus_object_path()
# Update the PVS
load_pvs(refresh=True, emit_signal=True, cache_refresh=False)
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return created_vg
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
@ -107,10 +121,10 @@ class Manager(AutomatedProperties):
rc = cfg.load(log=False)
if rc != 0:
utils.log_debug('Manager.Refresh - exit %d %d' % (rc, lc),
utils.log_debug('Manager.Refresh - exit %d' % (rc),
'bg_black', 'fg_light_red')
else:
utils.log_debug('Manager.Refresh - exit %d %d' % (rc, lc))
utils.log_debug('Manager.Refresh - exit %d' % (rc))
return rc + lc
@dbus.service.method(
@ -131,28 +145,11 @@ class Manager(AutomatedProperties):
r = RequestEntry(-1, Manager._refresh, (), cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE)
def FlightRecorderDump(self):
"""
Dump the flight recorder to syslog
"""
cfg.blackbox.dump()
@staticmethod
def _lookup_by_lvm_id(key):
p = cfg.om.get_object_path_by_uuid_lvm_id(key, key)
if not p:
p = '/'
utils.log_debug('LookUpByLvmId: key = %s, result = %s' % (key, p))
return p
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='s',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def LookUpByLvmId(self, key, cb, cbe):
out_signature='o')
def LookUpByLvmId(self, key):
"""
Given a lvm id in one of the forms:
@ -164,54 +161,37 @@ class Manager(AutomatedProperties):
return the object path in O(1) time.
:param key: The lookup value
:param cb: dbus python call back parameter, not client visible
:param cbe: dbus python error call back parameter, not client visible
:return: Return the object path. If object not found you will get '/'
"""
r = RequestEntry(-1, Manager._lookup_by_lvm_id, (key,), cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _use_lvm_shell(yes_no):
return dbus.Boolean(cmdhandler.set_execution(yes_no))
p = cfg.om.get_object_path_by_uuid_lvm_id(key, key)
if p:
return p
return '/'
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='b', out_signature='b',
async_callbacks=('cb', 'cbe'))
def UseLvmShell(self, yes_no, cb, cbe):
in_signature='b', out_signature='b')
def UseLvmShell(self, yes_no):
"""
Allow the client to enable/disable lvm shell, used for testing
:param yes_no:
:param cb: dbus python call back parameter, not client visible
:param cbe: dbus python error call back parameter, not client visible
:return: Boolean
:return: Nothing
"""
r = RequestEntry(-1, Manager._use_lvm_shell, (yes_no,), cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _external_event(command):
utils.log_debug("Processing _external_event= %s" % command,
'bg_black', 'fg_orange')
cfg.load()
return dbus.Boolean(cmdhandler.set_execution(yes_no))
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='s', out_signature='i')
def ExternalEvent(self, command):
utils.log_debug("ExternalEvent %s" % command)
# If a user didn't explicitly specify udev, we will turn it off now.
if not cfg.args.use_udev:
if udevwatch.remove():
utils.log_debug("ExternalEvent received, disabling "
"udev monitoring")
# We are dependent on external events now to stay current!
cfg.got_external_event = True
r = RequestEntry(
-1, Manager._external_event, (command,), None, None, False)
cfg.worker_q.put(r)
cfg.ee = True
event_add((command,))
return dbus.Int32(0)
@staticmethod
@ -221,8 +201,15 @@ class Manager(AutomatedProperties):
activate, cache, device_path,
major_minor, scan_options)
Manager.handle_execute(rc, out, err)
return '/'
if rc == 0:
# This could potentially change the state quite a bit, so lets
# update everything to be safe
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,

View File

@ -32,12 +32,14 @@ class ObjectManager(AutomatedProperties):
self._id_to_object_path = {}
self.rlock = threading.RLock()
@staticmethod
def _get_managed_objects(obj):
with obj.rlock:
@dbus.service.method(
dbus_interface="org.freedesktop.DBus.ObjectManager",
out_signature='a{oa{sa{sv}}}')
def GetManagedObjects(self):
with self.rlock:
rc = {}
try:
for k, v in list(obj._objects.items()):
for k, v in list(self._objects.items()):
path, props = v[0].emit_data()
rc[path] = props
except Exception:
@ -45,14 +47,6 @@ class ObjectManager(AutomatedProperties):
sys.exit(1)
return rc
@dbus.service.method(
dbus_interface="org.freedesktop.DBus.ObjectManager",
out_signature='a{oa{sa{sv}}}', async_callbacks=('cb', 'cbe'))
def GetManagedObjects(self, cb, cbe):
r = cfg.create_request_entry(-1, ObjectManager._get_managed_objects,
(self, ), cb, cbe, False)
cfg.worker_q.put(r)
def locked(self):
"""
If some external code need to run across a number of different
@ -189,8 +183,8 @@ class ObjectManager(AutomatedProperties):
path = dbus_object.dbus_object_path()
interfaces = dbus_object.interface()
# print('UN-Registering object path %s for %s' %
# (path, dbus_object.lvm_id))
# print 'UN-Registering object path %s for %s' % \
# (path, dbus_object.lvm_id)
self._lookup_remove(path)
@ -223,9 +217,8 @@ class ObjectManager(AutomatedProperties):
:param lvm_id: The lvm identifier
"""
with self.rlock:
lookup_rc = self._id_lookup(lvm_id)
if lookup_rc:
return self.get_object_by_path(lookup_rc)
if lvm_id in self._id_to_object_path:
return self.get_object_by_path(self._id_to_object_path[lvm_id])
return None
def get_object_path_by_lvm_id(self, lvm_id):
@ -235,24 +228,43 @@ class ObjectManager(AutomatedProperties):
:return: Object path or '/' if not found
"""
with self.rlock:
lookup_rc = self._id_lookup(lvm_id)
if lookup_rc:
return lookup_rc
if lvm_id in self._id_to_object_path:
return self._id_to_object_path[lvm_id]
return '/'
def _id_verify(self, path, uuid, lvm_id):
def _uuid_verify(self, path, uuid, lvm_id):
"""
Ensure our lookups are correct
Ensure uuid is present for a successful lvm_id lookup
NOTE: Internal call, assumes under object manager lock
:param path: Path to object we looked up
:param uuid: uuid lookup
:param lvm_id: lvm_id lookup
:param uuid: lvm uuid to verify
:param lvm_id: lvm_id used to find object
:return: None
"""
# There is no durable non-changeable name in lvm
# This gets called when we found an object based on lvm_id, ensure
# uuid is correct too, as they can change. There is no durable
# non-changeable name in lvm
if lvm_id != uuid:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
if uuid and uuid not in self._id_to_object_path:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
def _lvm_id_verify(self, path, uuid, lvm_id):
"""
Ensure lvm_id is present for a successful uuid lookup
NOTE: Internal call, assumes under object manager lock
:param path: Path to object we looked up
:param uuid: uuid used to find object
:param lvm_id: lvm_id to verify
:return: None
"""
# This gets called when we found an object based on uuid, ensure
# lvm_id is correct too, as they can change. There is no durable
# non-changeable name in lvm
if lvm_id != uuid:
if lvm_id and lvm_id not in self._id_to_object_path:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
def _id_lookup(self, the_id):
path = None
@ -319,22 +331,22 @@ class ObjectManager(AutomatedProperties):
# Lets check for the uuid first
path = self._id_lookup(uuid)
if path:
# Ensure table lookups are correct
self._id_verify(path, uuid, lvm_id)
# Verify the lvm_id is sane
self._lvm_id_verify(path, uuid, lvm_id)
else:
# Unable to find by UUID, lets lookup by lvm_id
path = self._id_lookup(lvm_id)
if path:
# Ensure table lookups are correct
self._id_verify(path, uuid, lvm_id)
# Verify the uuid is sane
self._uuid_verify(path, uuid, lvm_id)
else:
# We have exhausted all lookups, let's create if we can
if path_create:
path = path_create()
self._lookup_add(None, path, lvm_id, uuid)
# print('get_object_path_by_lvm_id(%s, %s, %s): return %s' %
# (uuid, lvm_id, str(path_create), path))
# print('get_object_path_by_lvm_id(%s, %s, %s, %s: return %s' %
# (uuid, lvm_id, str(path_create), str(gen_new), path))
return path

View File

@ -14,7 +14,7 @@ import dbus
from .cfg import PV_INTERFACE
from . import cmdhandler
from .utils import vg_obj_path_generate, n, pv_obj_path_generate, \
lv_object_path_method, _handle_execute
lv_object_path_method
from .loader import common
from .request import RequestEntry
from .state import State
@ -79,9 +79,7 @@ class PvState(State):
self.lv = self._lv_object_list(vg_name)
# It's possible to have a vg_name and no uuid with the main example
# being when the vg_name == '[unknown]'
if vg_uuid and vg_name:
if vg_name:
self.vg_path = cfg.om.get_object_path_by_uuid_lvm_id(
vg_uuid, vg_name, vg_obj_path_generate)
else:
@ -137,23 +135,23 @@ class Pv(AutomatedProperties):
def _remove(pv_uuid, pv_name, remove_options):
# Remove the PV, if successful then remove from the model
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
Pv.handle_execute(*cmdhandler.pv_remove(pv_name, remove_options))
return '/'
@staticmethod
def handle_execute(rc, out, err):
return _handle_execute(rc, out, err, PV_INTERFACE)
@staticmethod
def validate_dbus_object(pv_uuid, pv_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
if not dbo:
if dbo:
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
if rc == 0:
cfg.om.remove_object(dbo, True)
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return dbo
return '/'
@dbus.service.method(
dbus_interface=PV_INTERFACE,
@ -170,9 +168,22 @@ class Pv(AutomatedProperties):
@staticmethod
def _resize(pv_uuid, pv_name, new_size_bytes, resize_options):
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
Pv.handle_execute(*cmdhandler.pv_resize(pv_name, new_size_bytes,
resize_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
if dbo:
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
resize_options)
if rc == 0:
dbo.refresh()
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return '/'
@dbus.service.method(
@ -190,9 +201,21 @@ class Pv(AutomatedProperties):
@staticmethod
def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options):
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
Pv.handle_execute(*cmdhandler.pv_allocatable(pv_name, yes_no,
allocation_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
if dbo:
rc, out, err = cmdhandler.pv_allocatable(
pv_name, yes_no, allocation_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return '/'
@dbus.service.method(

View File

@ -0,0 +1,45 @@
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Try and minimize the refreshes we do.
import threading
from .request import RequestEntry
from . import cfg
from . import utils
_rlock = threading.RLock()
_count = 0
def handle_external_event(command):
utils.log_debug("External event: '%s'" % command)
event_complete()
cfg.load()
def event_add(params):
global _rlock
global _count
with _rlock:
if _count == 0:
_count += 1
r = RequestEntry(
-1, handle_external_event,
params, None, None, False)
cfg.worker_q.put(r)
def event_complete():
global _rlock
global _count
with _rlock:
if _count > 0:
_count -= 1
return _count

View File

@ -13,12 +13,13 @@ from gi.repository import GLib
from .job import Job
from . import cfg
import traceback
from .utils import log_error, mt_async_call
from .utils import log_error
class RequestEntry(object):
def __init__(self, tmo, method, arguments, cb, cb_error,
return_tuple=True, job_state=None):
self.tmo = tmo
self.method = method
self.arguments = arguments
self.cb = cb
@ -34,32 +35,25 @@ class RequestEntry(object):
self._return_tuple = return_tuple
self._job_state = job_state
if tmo < 0:
if self.tmo < 0:
# Client is willing to block forever
pass
elif tmo == 0:
self._return_job()
else:
# Note: using 990 instead of 1000 for second to ms conversion to
# account for overhead. Goal is to return just before the
# timeout amount has expired. Better to be a little early than
# late.
self.timer_id = GLib.timeout_add(
tmo * 990, RequestEntry._request_timeout, self)
self.timer_id = GLib.timeout_add_seconds(
tmo, RequestEntry._request_timeout, self)
@staticmethod
def _request_timeout(r):
"""
Method which gets called when the timer runs out!
:param r: RequestEntry which timed out
:return: Result of timer_expired
:return: Nothing
"""
return r.timer_expired()
r.timer_expired()
def _return_job(self):
# Return job is only called when we create a request object or when
# we pop a timer. In both cases we are running in the correct context
# and do not need to schedule the call back in main context.
self._job = Job(self, self._job_state)
cfg.om.register_object(self._job, True)
if self._return_tuple:
@ -116,9 +110,9 @@ class RequestEntry(object):
if error_rc == 0:
if self.cb:
if self._return_tuple:
mt_async_call(self.cb, (result, '/'))
self.cb((result, '/'))
else:
mt_async_call(self.cb, result)
self.cb(result)
else:
if self.cb_error:
if not error_exception:
@ -129,9 +123,10 @@ class RequestEntry(object):
else:
error_exception = Exception(error_msg)
mt_async_call(self.cb_error, error_exception)
self.cb_error(error_exception)
else:
# We have a job and it's complete, indicate that it's done.
# TODO: We need to signal the job is done too.
self._job.Complete = True
self._job = None

View File

@ -9,42 +9,12 @@
import pyudev
import threading
from .refresh import event_add
from . import cfg
from .request import RequestEntry
from . import utils
observer = None
observer_lock = threading.RLock()
_udev_lock = threading.RLock()
_udev_count = 0
def udev_add():
global _udev_count
with _udev_lock:
if _udev_count == 0:
_udev_count += 1
# Place this on the queue so any other operations will sequence
# behind it
r = RequestEntry(
-1, _udev_event, (), None, None, False)
cfg.worker_q.put(r)
def udev_complete():
global _udev_count
with _udev_lock:
if _udev_count > 0:
_udev_count -= 1
def _udev_event():
utils.log_debug("Processing udev event")
udev_complete()
cfg.load()
# noinspection PyUnusedLocal
def filter_event(action, device):
@ -52,8 +22,8 @@ def filter_event(action, device):
# when appropriate.
refresh = False
if 'ID_FS_TYPE' in device:
fs_type_new = device['ID_FS_TYPE']
if '.ID_FS_TYPE_NEW' in device:
fs_type_new = device['.ID_FS_TYPE_NEW']
if 'LVM' in fs_type_new:
refresh = True
@ -68,7 +38,7 @@ def filter_event(action, device):
refresh = True
if refresh:
udev_add()
event_add(('udev',))
def add():

View File

@ -14,28 +14,14 @@ import ctypes
import os
import string
import datetime
from fcntl import fcntl, F_GETFL, F_SETFL
import dbus
from lvmdbusd import cfg
# noinspection PyUnresolvedReferences
from gi.repository import GLib
import threading
import traceback
import signal
STDOUT_TTY = os.isatty(sys.stdout.fileno())
def _handle_execute(rc, out, err, interface):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
interface, 'Exit code %s, stderr = %s' % (str(rc), err))
def rtype(dbus_type):
"""
Decorator making sure that the decorated function returns a value of
@ -67,20 +53,8 @@ def n32(v):
return int(float(v))
@rtype(dbus.Double)
def d(v):
if not v:
return 0.0
return float(v)
def _snake_to_pascal(s):
return ''.join(x.title() for x in s.split('_'))
# noinspection PyProtectedMember
def init_class_from_arguments(
obj_instance, begin_suffix=None, snake_to_pascal=False):
def init_class_from_arguments(obj_instance):
for k, v in list(sys._getframe(1).f_locals.items()):
if k != 'self':
nt = k
@ -91,17 +65,8 @@ def init_class_from_arguments(
cur = getattr(obj_instance, nt, v)
# print 'Init class %s = %s' % (nt, str(v))
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0)\
and (begin_suffix is None or nt.startswith(begin_suffix)):
if begin_suffix and nt.startswith(begin_suffix):
name = nt[len(begin_suffix):]
if snake_to_pascal:
name = _snake_to_pascal(name)
setattr(obj_instance, name, v)
else:
setattr(obj_instance, nt, v)
if not (cur and len(str(cur)) and (v is None or len(str(v))) == 0):
setattr(obj_instance, nt, v)
def get_properties(f):
@ -313,47 +278,12 @@ def log_error(msg, *attributes):
_common_log(msg, *attributes)
def dump_threads_stackframe():
ident_to_name = {}
for thread_object in threading.enumerate():
ident_to_name[thread_object.ident] = thread_object
stacks = []
for thread_ident, frame in sys._current_frames().items():
stack = traceback.format_list(traceback.extract_stack(frame))
# There is a possibility that a thread gets created after we have
# enumerated all threads, so this lookup table may be incomplete, so
# account for this
if thread_ident in ident_to_name:
thread_name = ident_to_name[thread_ident].name
else:
thread_name = "unknown"
stacks.append("Thread: %s" % (thread_name))
stacks.append("".join(stack))
log_error("Dumping thread stack frames!\n" + "\n".join(stacks))
# noinspection PyUnusedLocal
def handler(signum):
try:
if signum == signal.SIGUSR1:
dump_threads_stackframe()
else:
cfg.run.value = 0
log_debug('Exiting daemon with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
except:
st = traceback.format_exc()
log_error("signal handler: exception (logged, not reported!) \n %s" % st)
# It's important we report that we handled the exception for the exception
# handler to continue to work, especially for signal 10 (SIGUSR1)
return True
def handler(signum, frame):
cfg.run.value = 0
log_debug('Signal handler called with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
def pv_obj_path_generate():
@ -369,8 +299,6 @@ def lv_object_path_method(name, meta):
return _hidden_lv_obj_path_generate
elif meta[0][0] == 't':
return _thin_pool_obj_path_generate
elif meta[0][0] == 'd':
return _vdo_pool_object_path_generate
elif meta[0][0] == 'C' and 'pool' in meta[1]:
return _cache_pool_obj_path_generate
@ -388,10 +316,6 @@ def _thin_pool_obj_path_generate():
return cfg.THIN_POOL_PATH + "/%d" % next(cfg.thin_id)
def _vdo_pool_object_path_generate():
return cfg.VDO_POOL_PATH + "/%d" % next(cfg.vdo_id)
def _cache_pool_obj_path_generate():
return cfg.CACHE_POOL_PATH + "/%d" % next(cfg.cache_pool_id)
@ -483,7 +407,7 @@ _ALLOWABLE_CH_SET = set(_ALLOWABLE_CH)
_ALLOWABLE_VG_LV_CH = string.ascii_letters + string.digits + '.-_+'
_ALLOWABLE_VG_LV_CH_SET = set(_ALLOWABLE_VG_LV_CH)
_LV_NAME_RESERVED = ("_cdata", "_cmeta", "_corig", "_mimage", "_mlog",
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin", "_vdata")
"_pmspare", "_rimage", "_rmeta", "_tdata", "_tmeta", "_vorigin")
# Tags can have the characters, based on the code
# a-zA-Z0-9._-+/=!:&#
@ -570,128 +494,3 @@ def validate_tag(interface, tag):
raise dbus.exceptions.DBusException(
interface, 'tag (%s) contains invalid character, allowable set(%s)'
% (tag, _ALLOWABLE_TAG_CH))
def add_no_notify(cmdline):
"""
Given a command line to execute we will see if `--config` is present, if it
is we will add the global/notify_dbus=0 to it, otherwise we will append it
to the end of the list.
:param: cmdline: The command line to inspect
:type: cmdline: list
:return: cmdline with notify_dbus config option present
:rtype: list
"""
# Only after we have seen an external event will be disable lvm from sending
# us one when we call lvm
if cfg.got_external_event:
if 'help' in cmdline:
return cmdline
if '--config' in cmdline:
for i, arg in enumerate(cmdline):
if arg == '--config':
if len(cmdline) <= i+1:
raise dbus.exceptions.DBusException("Missing value for --config option.")
cmdline[i+1] += " global/notify_dbus=0"
break
else:
cmdline.extend(['--config', 'global/notify_dbus=0'])
return cmdline
# The methods below which start with mt_* are used to execute the desired code
# on the the main thread of execution to alleviate any issues the dbus-python
# library with regards to multi-threaded access. Essentially, we are trying to
# ensure all dbus library interaction is done from the same thread!
def _async_handler(call_back, parameters):
params_str = ", ".join(str(x) for x in parameters)
log_debug('Main thread execution, callback = %s, parameters = (%s)' %
(str(call_back), params_str))
try:
if parameters:
call_back(*parameters)
else:
call_back()
except:
st = traceback.format_exc()
log_error("mt_async_call: exception (logged, not reported!) \n %s" % st)
# Execute the function on the main thread with the provided parameters, do
# not return *any* value or wait for the execution to complete!
def mt_async_call(function_call_back, *parameters):
GLib.idle_add(_async_handler, function_call_back, parameters)
# Run the supplied function and arguments on the main thread and wait for them
# to complete while allowing the ability to get the return value too.
#
# Example:
# result = MThreadRunner(foo, arg1, arg2).done()
#
class MThreadRunner(object):
@staticmethod
def runner(obj):
# noinspection PyProtectedMember
obj._run()
with obj.cond:
obj.function_complete = True
obj.cond.notify_all()
def __init__(self, function, *args):
self.f = function
self.rc = None
self.exception = None
self.args = args
self.function_complete = False
self.cond = threading.Condition(threading.Lock())
def done(self):
GLib.idle_add(MThreadRunner.runner, self)
with self.cond:
if not self.function_complete:
self.cond.wait()
if self.exception:
raise self.exception
return self.rc
def _run(self):
try:
if self.args:
self.rc = self.f(*self.args)
else:
self.rc = self.f()
except BaseException as be:
self.exception = be
st = traceback.format_exc()
log_error("MThreadRunner: exception \n %s" % st)
log_error("Exception will be raised in calling thread!")
def _remove_objects(dbus_objects_rm):
for o in dbus_objects_rm:
cfg.om.remove_object(o, emit_signal=True)
# Remove dbus objects from main thread
def mt_remove_dbus_objects(objs):
MThreadRunner(_remove_objects, objs).done()
# Make stream non-blocking
def make_non_block(stream):
flags = fcntl(stream, F_GETFL)
fcntl(stream, F_SETFL, flags | os.O_NONBLOCK)
def read_decoded(stream):
tmp = stream.read()
if tmp:
return tmp.decode("utf-8")
return ''

View File

@ -10,17 +10,16 @@
from .automatedproperties import AutomatedProperties
from . import utils
from .utils import pv_obj_path_generate, vg_obj_path_generate, n, \
_handle_execute
from .utils import pv_obj_path_generate, vg_obj_path_generate, n
import dbus
from . import cfg
from .cfg import VG_INTERFACE, VG_VDO_INTERFACE
from .cfg import VG_INTERFACE
from . import cmdhandler
from .request import RequestEntry
from .loader import common
from .state import State
from . import background
from .utils import round_size, mt_remove_dbus_objects
from .utils import round_size
from .job import JobState
@ -47,29 +46,24 @@ def vgs_state_retrieve(selection, cache_refresh=True):
def load_vgs(vg_specific=None, object_path=None, refresh=False,
emit_signal=False, cache_refresh=True):
return common(vgs_state_retrieve, (Vg, VgVdo, ), vg_specific, object_path, refresh,
return common(vgs_state_retrieve, (Vg,), vg_specific, object_path, refresh,
emit_signal, cache_refresh)
# noinspection PyPep8Naming,PyUnresolvedReferences,PyUnusedLocal
class VgState(State):
@property
def internal_name(self):
return self.Name
@property
def lvm_id(self):
return self.internal_name
return self.Name
def identifiers(self):
return (self.Uuid, self.internal_name)
return (self.Uuid, self.Name)
def _lv_paths_build(self):
rc = []
for lv in cfg.db.lvs_in_vg(self.Uuid):
(lv_name, meta, lv_uuid) = lv
full_name = "%s/%s" % (self.internal_name, lv_name)
full_name = "%s/%s" % (self.Name, lv_name)
gen = utils.lv_object_path_method(lv_name, meta)
@ -98,12 +92,8 @@ class VgState(State):
def create_dbus_object(self, path):
if not path:
path = cfg.om.get_object_path_by_uuid_lvm_id(
self.Uuid, self.internal_name, vg_obj_path_generate)
if cfg.vdo_support:
return VgVdo(path, self)
else:
return Vg(path, self)
self.Uuid, self.Name, vg_obj_path_generate)
return Vg(path, self)
# noinspection PyMethodMayBeStatic
def creation_signature(self):
@ -112,6 +102,7 @@ class VgState(State):
# noinspection PyPep8Naming
@utils.dbus_property(VG_INTERFACE, 'Uuid', 's')
@utils.dbus_property(VG_INTERFACE, 'Name', 's')
@utils.dbus_property(VG_INTERFACE, 'Fmt', 's')
@utils.dbus_property(VG_INTERFACE, 'SizeBytes', 't', 0)
@utils.dbus_property(VG_INTERFACE, 'FreeBytes', 't', 0)
@ -144,7 +135,6 @@ class Vg(AutomatedProperties):
_AllocNormal_meta = ('b', VG_INTERFACE)
_AllocAnywhere_meta = ('b', VG_INTERFACE)
_Clustered_meta = ('b', VG_INTERFACE)
_Name_meta = ('s', VG_INTERFACE)
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, object_path, object_state):
@ -155,28 +145,29 @@ class Vg(AutomatedProperties):
@staticmethod
def fetch_new_lv(vg_name, lv_name):
cfg.load()
return cfg.om.get_object_path_by_lvm_id("%s/%s" % (vg_name, lv_name))
@staticmethod
def handle_execute(rc, out, err):
return _handle_execute(rc, out, err, VG_INTERFACE)
@staticmethod
def validate_dbus_object(vg_uuid, vg_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(vg_uuid, vg_name)
if not dbo:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(vg_uuid, vg_name))
return dbo
@staticmethod
def _rename(uuid, vg_name, new_name, rename_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_rename(
uuid, new_name, rename_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_rename(vg_name, new_name,
rename_options)
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@ -193,9 +184,31 @@ class Vg(AutomatedProperties):
@staticmethod
def _remove(uuid, vg_name, remove_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
# Remove the VG, if successful then remove from the model
Vg.handle_execute(*cmdhandler.vg_remove(vg_name, remove_options))
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
# Remove the VG, if successful then remove from the model
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
if rc == 0:
# Remove the VG
cfg.om.remove_object(dbo, True)
# If an LV has hidden LVs, things can get quite involved,
# especially if it's the last thin pool to get removed, so
# lets refresh all
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@ -210,14 +223,32 @@ class Vg(AutomatedProperties):
@staticmethod
def _change(uuid, vg_name, change_options):
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_change(change_options, vg_name))
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
# To use an example with d-feet (Method input)
# {"activate": __import__('gi.repository.GLib', globals(),
# locals(), ['Variant']).Variant("s", "n")}
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
# TODO: This should be broken into a number of different methods
# instead of having one method that takes a hash for parameters. Some of
# the changes that vgchange does works on entire system, not just a
# specific vg, thus that should be in the Manager interface.
# specfic vg, thus that should be in the Manager interface.
@dbus.service.method(
dbus_interface=VG_INTERFACE,
in_signature='ia{sv}',
@ -232,23 +263,34 @@ class Vg(AutomatedProperties):
@staticmethod
def _reduce(uuid, vg_name, missing, pv_object_paths, reduce_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
pv_devices = []
if dbo:
pv_devices = []
# If pv_object_paths is not empty, then get the device paths
if pv_object_paths and len(pv_object_paths) > 0:
for pv_op in pv_object_paths:
pv = cfg.om.get_object_by_path(pv_op)
if pv:
pv_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Object path not found = %s!' % pv_op)
# If pv_object_paths is not empty, then get the device paths
if pv_object_paths and len(pv_object_paths) > 0:
for pv_op in pv_object_paths:
pv = cfg.om.get_object_by_path(pv_op)
if pv:
pv_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Object path not found = %s!' % pv_op)
Vg.handle_execute(*cmdhandler.vg_reduce(
vg_name, missing, pv_devices, reduce_options))
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
reduce_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@ -265,25 +307,36 @@ class Vg(AutomatedProperties):
@staticmethod
def _extend(uuid, vg_name, pv_object_paths, extend_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
extend_devices = []
if dbo:
extend_devices = []
for i in pv_object_paths:
pv = cfg.om.get_object_by_path(i)
if pv:
extend_devices.append(pv.lvm_id)
for i in pv_object_paths:
pv = cfg.om.get_object_by_path(i)
if pv:
extend_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV Object path not found = %s!' % i)
if len(extend_devices):
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
extend_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV Object path not found = %s!' % i)
if len(extend_devices):
Vg.handle_execute(*cmdhandler.vg_extend(
vg_name, extend_devices, extend_options))
VG_INTERFACE, 'No pv_object_paths provided!')
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'No pv_object_paths provided!')
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@ -320,22 +373,33 @@ class Vg(AutomatedProperties):
create_options):
# Make sure we have a dbus object representing it
pv_dests = []
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
Vg.validate_dbus_object(uuid, vg_name)
if dbo:
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Destination (%s) not found' % pr[0])
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Destination (%s) not found' % pr[0])
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
rc, out, err = cmdhandler.vg_lv_create(
vg_name, create_options, name, size_bytes, pv_dests)
Vg.handle_execute(*cmdhandler.vg_lv_create(
vg_name, create_options, name, size_bytes, pv_dests))
return Vg.fetch_new_lv(vg_name, name)
if rc == 0:
return Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -371,10 +435,25 @@ class Vg(AutomatedProperties):
def _lv_create_linear(uuid, vg_name, name, size_bytes,
thin_pool, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_lv_create_linear(
vg_name, create_options, name, size_bytes, thin_pool))
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_linear(
vg_name, create_options, name, size_bytes, thin_pool)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -394,11 +473,24 @@ class Vg(AutomatedProperties):
def _lv_create_striped(uuid, vg_name, name, size_bytes, num_stripes,
stripe_size_kb, thin_pool, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_lv_create_striped(
vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool))
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_striped(
vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -421,10 +513,25 @@ class Vg(AutomatedProperties):
def _lv_create_mirror(uuid, vg_name, name, size_bytes,
num_copies, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies))
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -445,11 +552,26 @@ class Vg(AutomatedProperties):
def _lv_create_raid(uuid, vg_name, name, raid_type, size_bytes,
num_stripes, stripe_size_kb, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_lv_create_raid(
vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb))
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_raid(
vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -470,27 +592,35 @@ class Vg(AutomatedProperties):
def _create_pool(uuid, vg_name, meta_data_lv, data_lv,
create_options, create_method):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
# Retrieve the full names for the metadata and data lv
md = cfg.om.get_object_by_path(meta_data_lv)
data = cfg.om.get_object_by_path(data_lv)
if md and data:
if dbo and md and data:
new_name = data.Name
rc, out, err = create_method(
md.lv_full_name(), data.lv_full_name(), create_options)
if rc == 0:
mt_remove_dbus_objects((md, data))
cfg.om.remove_object(md, emit_signal=True)
cfg.om.remove_object(data, emit_signal=True)
Vg.handle_execute(rc, out, err)
cache_pool_lv = Vg.fetch_new_lv(vg_name, new_name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
msg = ""
if not dbo:
msg += 'VG with uuid %s and name %s not present!' % \
(uuid, vg_name)
if not md:
msg += 'Meta data LV with object path %s not present!' % \
(meta_data_lv)
@ -501,7 +631,7 @@ class Vg(AutomatedProperties):
raise dbus.exceptions.DBusException(VG_INTERFACE, msg)
return Vg.fetch_new_lv(vg_name, new_name)
return cache_pool_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -535,20 +665,33 @@ class Vg(AutomatedProperties):
pv_devices = []
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
# Check for existence of pv object paths
for p in pv_object_paths:
pv = cfg.om.get_object_by_path(p)
if pv:
pv_devices.append(pv.Name)
if dbo:
# Check for existence of pv object paths
for p in pv_object_paths:
pv = cfg.om.get_object_by_path(p)
if pv:
pv_devices.append(pv.Name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV object path = %s not found' % p)
rc, out, err = cmdhandler.pv_tag(
pv_devices, tags_add, tags_del, tag_options)
if rc == 0:
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV object path = %s not found' % p)
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
Vg.handle_execute(*cmdhandler.pv_tag(
pv_devices, tags_add, tags_del, tag_options))
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -586,11 +729,25 @@ class Vg(AutomatedProperties):
@staticmethod
def _vg_add_rm_tags(uuid, vg_name, tags_add, tags_del, tag_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_tag(
vg_name, tags_add, tags_del, tag_options))
return '/'
if dbo:
rc, out, err = cmdhandler.vg_tag(
vg_name, tags_add, tags_del, tag_options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -627,9 +784,23 @@ class Vg(AutomatedProperties):
@staticmethod
def _vg_change_set(uuid, vg_name, method, value, options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*method(vg_name, value, options))
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = method(vg_name, value, options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -687,10 +858,23 @@ class Vg(AutomatedProperties):
def _vg_activate_deactivate(uuid, vg_name, activate, control_flags,
options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.activate_deactivate(
'vgchange', vg_name, activate, control_flags, options))
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.activate_deactivate(
'vgchange', vg_name, activate, control_flags, options)
if rc == 0:
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@ -716,12 +900,6 @@ class Vg(AutomatedProperties):
cb, cbe, return_tuple=False)
cfg.worker_q.put(r)
@property
def Name(self):
if ':' in self.state.Name:
return self.state.Name.split(':')[0]
return self.state.Name
@property
def Tags(self):
return utils.parse_tags(self.state.tags)
@ -777,71 +955,3 @@ class Vg(AutomatedProperties):
@property
def Clustered(self):
return self._attribute(5, 'c')
class VgVdo(Vg):
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, object_path, object_state):
super(VgVdo, self).__init__(object_path, vgs_state_retrieve)
self.set_interface(VG_VDO_INTERFACE)
self._object_path = object_path
self.state = object_state
@staticmethod
def _lv_vdo_pool_create_with_lv(uuid, vg_name, pool_name, lv_name,
data_size, virtual_size, create_options):
Vg.validate_dbus_object(uuid, vg_name)
Vg.handle_execute(*cmdhandler.vg_create_vdo_pool_lv_and_lv(
vg_name, pool_name, lv_name, data_size, virtual_size,
create_options))
return Vg.fetch_new_lv(vg_name, pool_name)
@dbus.service.method(
dbus_interface=VG_VDO_INTERFACE,
in_signature='ssttia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def CreateVdoPoolandLv(self, pool_name, lv_name, data_size, virtual_size,
tmo, create_options, cb, cbe):
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, pool_name)
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, lv_name)
r = RequestEntry(tmo, VgVdo._lv_vdo_pool_create_with_lv,
(self.state.Uuid, self.state.lvm_id,
pool_name, lv_name, round_size(data_size),
round_size(virtual_size),
create_options), cb, cbe)
cfg.worker_q.put(r)
@staticmethod
def _vdo_pool_create(uuid, vg_name, pool_lv, name, virtual_size, create_options):
Vg.validate_dbus_object(uuid, vg_name)
# Retrieve the full name of the pool lv
pool = cfg.om.get_object_by_path(pool_lv)
if not pool:
msg = 'LV with object path %s not present!' % \
(pool_lv)
raise dbus.exceptions.DBusException(VG_VDO_INTERFACE, msg)
Vg.handle_execute(*cmdhandler.vg_create_vdo_pool(
pool.lv_full_name(), name, virtual_size,
create_options))
return Vg.fetch_new_lv(vg_name, pool.Name)
@dbus.service.method(
dbus_interface=VG_VDO_INTERFACE,
in_signature='ostia{sv}',
out_signature='(oo)',
async_callbacks=('cb', 'cbe'))
def CreateVdoPool(self, pool_lv, name, virtual_size,
tmo, create_options, cb, cbe):
utils.validate_lv_name(VG_VDO_INTERFACE, self.Name, name)
r = RequestEntry(tmo, VgVdo._vdo_pool_create,
(self.state.Uuid, self.state.lvm_id,
pool_lv, name,
round_size(virtual_size),
create_options), cb, cbe)
cfg.worker_q.put(r)

2
daemons/lvmetad/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
lvmetad
lvmetactl

View File

@ -0,0 +1,65 @@
#
# Copyright (C) 2011-2012 Red Hat, Inc.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU Lesser General Public License v.2.1.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES = lvmetad-core.c
SOURCES2 = testclient.c
TARGETS = lvmetad lvmetactl
.PHONY: install_lvmetad
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
CFLOW_TARGET = lvmetad
include $(top_builddir)/make.tmpl
INCLUDES += -I$(top_srcdir)/libdaemon/server
LVMLIBS = -ldaemonserver $(LVMINTERNAL_LIBS) -ldevmapper
LIBS += $(PTHREAD_LIBS)
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS)
CLDFLAGS += -L$(top_builddir)/libdaemon/server
CFLAGS += $(EXTRA_EXEC_CFLAGS)
lvmetad: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LVMLIBS) $(LIBS)
lvmetactl: lvmetactl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmetactl.o $(LVMLIBS)
CLEAN_TARGETS += lvmetactl.o
# TODO: No idea. No idea how to test either.
#ifneq ("$(CFLOW_CMD)", "")
#CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES))
#-include $(top_builddir)/libdm/libdevmapper.cflow
#-include $(top_builddir)/lib/liblvm-internal.cflow
#-include $(top_builddir)/lib/liblvm2cmd.cflow
#-include $(top_builddir)/daemons/dmeventd/$(LIB_NAME).cflow
#-include $(top_builddir)/daemons/dmeventd/plugins/mirror/$(LIB_NAME)-lvm2mirror.cflow
#endif
install_lvmetad: lvmetad
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
install_lvm2: install_lvmetad
install: install_lvm2

249
daemons/lvmetad/lvmetactl.c Normal file
View File

@ -0,0 +1,249 @@
/*
* Copyright (C) 2014 Red Hat, Inc.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#include "tool.h"
#include "lvmetad-client.h"
daemon_handle h;
static void print_reply(daemon_reply reply)
{
const char *a = daemon_reply_str(reply, "response", NULL);
const char *b = daemon_reply_str(reply, "status", NULL);
const char *c = daemon_reply_str(reply, "reason", NULL);
printf("response \"%s\" status \"%s\" reason \"%s\"\n",
a ? a : "", b ? b : "", c ? c : "");
}
int main(int argc, char **argv)
{
daemon_reply reply;
char *cmd;
char *uuid;
char *name;
int val;
int ver;
if (argc < 2) {
printf("lvmetactl dump\n");
printf("lvmetactl pv_list\n");
printf("lvmetactl vg_list\n");
printf("lvmetactl get_global_info\n");
printf("lvmetactl vg_lookup_name <name>\n");
printf("lvmetactl vg_lookup_uuid <uuid>\n");
printf("lvmetactl pv_lookup_uuid <uuid>\n");
printf("lvmetactl set_global_invalid 0|1\n");
printf("lvmetactl set_global_disable 0|1\n");
printf("lvmetactl set_vg_version <uuid> <name> <version>\n");
printf("lvmetactl vg_lock_type <uuid>\n");
return -1;
}
cmd = argv[1];
h = lvmetad_open(NULL);
if (!strcmp(cmd, "dump")) {
reply = daemon_send_simple(h, "dump",
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "pv_list")) {
reply = daemon_send_simple(h, "pv_list",
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "vg_list")) {
reply = daemon_send_simple(h, "vg_list",
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "get_global_info")) {
reply = daemon_send_simple(h, "get_global_info",
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "set_global_invalid")) {
if (argc < 3) {
printf("set_global_invalid 0|1\n");
return -1;
}
val = atoi(argv[2]);
reply = daemon_send_simple(h, "set_global_info",
"global_invalid = " FMTd64, (int64_t) val,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
print_reply(reply);
} else if (!strcmp(cmd, "set_global_disable")) {
if (argc < 3) {
printf("set_global_disable 0|1\n");
return -1;
}
val = atoi(argv[2]);
reply = daemon_send_simple(h, "set_global_info",
"global_disable = " FMTd64, (int64_t) val,
"disable_reason = %s", LVMETAD_DISABLE_REASON_DIRECT,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
print_reply(reply);
} else if (!strcmp(cmd, "set_vg_version")) {
if (argc < 5) {
printf("set_vg_version <uuid> <name> <ver>\n");
return -1;
}
uuid = argv[2];
name = argv[3];
ver = atoi(argv[4]);
if ((strlen(uuid) == 1) && (uuid[0] == '-'))
uuid = NULL;
if ((strlen(name) == 1) && (name[0] == '-'))
name = NULL;
if (uuid && name) {
reply = daemon_send_simple(h, "set_vg_info",
"uuid = %s", uuid,
"name = %s", name,
"version = " FMTd64, (int64_t) ver,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
} else if (uuid) {
reply = daemon_send_simple(h, "set_vg_info",
"uuid = %s", uuid,
"version = " FMTd64, (int64_t) ver,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
} else if (name) {
reply = daemon_send_simple(h, "set_vg_info",
"name = %s", name,
"version = " FMTd64, (int64_t) ver,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
} else {
printf("name or uuid required\n");
return -1;
}
print_reply(reply);
} else if (!strcmp(cmd, "vg_lookup_name")) {
if (argc < 3) {
printf("vg_lookup_name <name>\n");
return -1;
}
name = argv[2];
reply = daemon_send_simple(h, "vg_lookup",
"name = %s", name,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "vg_lookup_uuid")) {
if (argc < 3) {
printf("vg_lookup_uuid <uuid>\n");
return -1;
}
uuid = argv[2];
reply = daemon_send_simple(h, "vg_lookup",
"uuid = %s", uuid,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "vg_lock_type")) {
struct dm_config_node *metadata;
const char *lock_type;
if (argc < 3) {
printf("vg_lock_type <uuid>\n");
return -1;
}
uuid = argv[2];
reply = daemon_send_simple(h, "vg_lookup",
"uuid = %s", uuid,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
/* printf("%s\n", reply.buffer.mem); */
metadata = dm_config_find_node(reply.cft->root, "metadata");
if (!metadata) {
printf("no metadata\n");
goto out;
}
lock_type = dm_config_find_str(metadata, "metadata/lock_type", NULL);
if (!lock_type) {
printf("no lock_type\n");
goto out;
}
printf("lock_type %s\n", lock_type);
} else if (!strcmp(cmd, "pv_lookup_uuid")) {
if (argc < 3) {
printf("pv_lookup_uuid <uuid>\n");
return -1;
}
uuid = argv[2];
reply = daemon_send_simple(h, "pv_lookup",
"uuid = %s", uuid,
"token = %s", "skip",
"pid = " FMTd64, (int64_t)getpid(),
"cmd = %s", "lvmetactl",
NULL);
printf("%s\n", reply.buffer.mem);
} else {
printf("unknown command\n");
goto out_close;
}
out:
daemon_reply_destroy(reply);
out_close:
daemon_close(h);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More