1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-09-19 01:44:19 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
David Teigland
e58d38a322 commands: new method for defining commands 2016-09-02 14:24:42 -05:00
1053 changed files with 44214 additions and 100036 deletions

105
.gitignore vendored
View File

@@ -1,7 +1,6 @@
*.5
*.7
*.8
*.8_gen
*.a
*.d
*.o
@@ -28,108 +27,6 @@ make.tmpl
/config.log
/config.status
/configure.scan
/cscope.*
/html/
/reports/
/cscope.out
/tags
/tmp/
tools/man-generator
tools/man-generator.c
test/lib/lvchange
test/lib/lvconvert
test/lib/lvcreate
test/lib/lvdisplay
test/lib/lvextend
test/lib/lvmconfig
test/lib/lvmdiskscan
test/lib/lvmsadc
test/lib/lvmsar
test/lib/lvreduce
test/lib/lvremove
test/lib/lvrename
test/lib/lvresize
test/lib/lvs
test/lib/lvscan
test/lib/pvchange
test/lib/pvck
test/lib/pvcreate
test/lib/pvdisplay
test/lib/pvmove
test/lib/pvremove
test/lib/pvresize
test/lib/pvs
test/lib/pvscan
test/lib/vgcfgbackup
test/lib/vgcfgrestore
test/lib/vgchange
test/lib/vgck
test/lib/vgconvert
test/lib/vgcreate
test/lib/vgdisplay
test/lib/vgexport
test/lib/vgextend
test/lib/vgimport
test/lib/vgimportclone
test/lib/vgmerge
test/lib/vgmknodes
test/lib/vgreduce
test/lib/vgremove
test/lib/vgrename
test/lib/vgs
test/lib/vgscan
test/lib/vgsplit
test/api/lvtest.t
test/api/pe_start.t
test/api/percent.t
test/api/python_lvm_unit.py
test/api/test
test/api/thin_percent.t
test/api/vglist.t
test/api/vgtest.t
test/lib/aux
test/lib/check
test/lib/clvmd
test/lib/dm-version-expected
test/lib/dmeventd
test/lib/dmsetup
test/lib/dmstats
test/lib/fail
test/lib/flavour-ndev-cluster
test/lib/flavour-ndev-cluster-lvmpolld
test/lib/flavour-ndev-lvmetad
test/lib/flavour-ndev-lvmetad-lvmpolld
test/lib/flavour-ndev-lvmpolld
test/lib/flavour-ndev-vanilla
test/lib/flavour-udev-cluster
test/lib/flavour-udev-cluster-lvmpolld
test/lib/flavour-udev-lvmetad
test/lib/flavour-udev-lvmetad-lvmpolld
test/lib/flavour-udev-lvmlockd-dlm
test/lib/flavour-udev-lvmlockd-sanlock
test/lib/flavour-udev-lvmlockd-test
test/lib/flavour-udev-lvmpolld
test/lib/flavour-udev-vanilla
test/lib/fsadm
test/lib/get
test/lib/inittest
test/lib/invalid
test/lib/lvm
test/lib/lvm-wrapper
test/lib/lvmchange
test/lib/lvmdbusd.profile
test/lib/lvmetad
test/lib/lvmpolld
test/lib/not
test/lib/paths
test/lib/paths-common
test/lib/runner
test/lib/should
test/lib/test
test/lib/thin-performance.profile
test/lib/utils
test/lib/version-expected
test/unit/dmraid_t.c
test/unit/unit-test

View File

@@ -1,25 +0,0 @@
BSD 2-Clause License
Copyright (c) 2014, Red Hat, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -18,7 +18,7 @@ top_builddir = @top_builddir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
SUBDIRS = conf daemons include lib libdaemon libdm man scripts device_mapper tools
SUBDIRS = conf daemons include lib libdaemon libdm man scripts tools
ifeq ("@UDEV_RULES@", "yes")
SUBDIRS += udev
@@ -43,7 +43,8 @@ endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = conf include man test scripts \
libdaemon lib tools daemons libdm \
udev po liblvm python device_mapper
udev po liblvm python \
unit-tests/datastruct unit-tests/mm unit-tests/regex
tools.distclean: test.distclean
endif
DISTCLEAN_DIRS += lcov_reports*
@@ -51,21 +52,14 @@ DISTCLEAN_TARGETS += config.cache config.log config.status make.tmpl
include make.tmpl
include $(top_srcdir)/base/Makefile
libdm: include $(top_builddir)/base/libbase.a
libdaemon: include $(top_builddir)/base/libbase.a
lib: libdm libdaemon $(top_builddir)/base/libbase.a
liblvm: lib $(top_builddir)/base/libbase.a
daemons: lib libdaemon tools $(top_builddir)/base/libbase.a
tools: lib libdaemon device-mapper $(top_builddir)/base/libbase.a
libdm: include
libdaemon: include
lib: libdm libdaemon
liblvm: lib
daemons: lib libdaemon tools
tools: lib libdaemon device-mapper
po: tools daemons
man: tools
all_man: tools
scripts: liblvm libdm
test: tools daemons $(top_builddir)/base/libbase.a
unit-test: lib $(top_builddir)/base/libbase.a
run-unit-test: unit-test
lib.device-mapper: include.device-mapper
libdm.device-mapper: include.device-mapper
@@ -101,10 +95,10 @@ endif
DISTCLEAN_TARGETS += cscope.out
CLEAN_DIRS += autom4te.cache
check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit-test run-unit-test: test
check check_system check_cluster check_local check_lvmetad check_lvmpolld check_lvmlockd_test check_lvmlockd_dlm check_lvmlockd_sanlock unit: all
$(MAKE) -C test $(@)
conf.generate man.generate: tools
conf.generate: tools
# how to use parenthesis in makefiles
leftparen:=(
@@ -126,17 +120,16 @@ rpm: dist
$(LN_S) -f $(abs_top_srcdir)/spec/build.inc $(rpmbuilddir)/SOURCES
$(LN_S) -f $(abs_top_srcdir)/spec/macros.inc $(rpmbuilddir)/SOURCES
$(LN_S) -f $(abs_top_srcdir)/spec/packages.inc $(rpmbuilddir)/SOURCES
DM_VER=$$(cut -d' ' -f1 $(top_srcdir)/VERSION_DM | cut -d- -f1);\
GIT_VER=$$(cd $(top_srcdir); git describe | cut -s -d- --output-delimiter=. -f2,3);\
DM_VER=$$(cut -d- -f1 $(top_srcdir)/VERSION_DM);\
GIT_VER=$$(cd $(top_srcdir); git describe | cut -d- --output-delimiter=. -f2,3 || echo 0);\
sed -e "s,\(device_mapper_version\) [0-9.]*$$,\1 $$DM_VER," \
-e "s,^\(Version:[^0-9%]*\)[0-9.]*$$,\1 $(LVM_VER)," \
-e "s,^\(Release:[^0-9%]*\)[0-9.]\+,\1 $${GIT_VER:-"0"}," \
-e "s,^\(Release:[^0-9%]*\)[0-9.]\+,\1 $$GIT_VER," \
$(top_srcdir)/spec/source.inc >$(rpmbuilddir)/SOURCES/source.inc
rpmbuild -v --define "_topdir $(rpmbuilddir)" -ba $(top_srcdir)/spec/lvm2.spec
generate: conf.generate man.generate
generate: conf.generate
$(MAKE) -C conf generate
$(MAKE) -C man generate
all_man:
$(MAKE) -C man all_man
@@ -150,7 +143,7 @@ install_system_dirs:
$(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_RUN_DIR)
$(INSTALL_ROOT_DATA) /dev/null $(DESTDIR)$(DEFAULT_CACHE_DIR)/.cache
install_initscripts:
install_initscripts:
$(MAKE) -C scripts install_initscripts
install_systemd_generators:
@@ -173,7 +166,6 @@ install_tmpfiles_configuration:
LCOV_TRACES = libdm.info lib.info liblvm.info tools.info \
libdaemon/client.info libdaemon/server.info \
test/unit.info \
daemons/clvmd.info \
daemons/dmeventd.info \
daemons/lvmetad.info \
@@ -205,17 +197,42 @@ $(LCOV_TRACES):
ifneq ("$(GENHTML)", "")
lcov: $(LCOV_TRACES)
$(RM) -rf $(LCOV_REPORTS_DIR)
$(RM) -r $(LCOV_REPORTS_DIR)
$(MKDIR_P) $(LCOV_REPORTS_DIR)
$(LCOV) --capture --directory $(top_builddir) --ignore-errors source \
--output-file $(LCOV_REPORTS_DIR)/out.info
-test ! -s $(LCOV_REPORTS_DIR)/out.info || \
$(GENHTML) -o $(LCOV_REPORTS_DIR) --ignore-errors source \
$(LCOV_REPORTS_DIR)/out.info
for i in $(LCOV_TRACES); do \
test -s $$i -a $$(wc -w <$$i) -ge 100 && lc="$$lc $$i"; \
done; \
test -z "$$lc" || $(GENHTML) -p @abs_top_builddir@ \
-o $(LCOV_REPORTS_DIR) $$lc
endif
endif
ifeq ("$(TESTING)", "yes")
# testing and report generation
RUBY=ruby1.9 -Ireport-generators/lib -Ireport-generators/test
.PHONY: unit-test ruby-test test-programs
# FIXME: put dependencies on libdm and liblvm
# FIXME: Should be handled by Makefiles in subdirs, not here at top level.
test-programs:
cd unit-tests/regex && $(MAKE)
cd unit-tests/datastruct && $(MAKE)
cd unit-tests/mm && $(MAKE)
unit-test: test-programs
$(RUBY) report-generators/unit_test.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
memcheck: test-programs
$(RUBY) report-generators/memcheck.rb $(shell find . -name TESTS)
$(RUBY) report-generators/title_page.rb
ruby-test:
$(RUBY) report-generators/test/ts.rb
endif
ifneq ($(shell which ctags),)
.PHONY: tags
tags:

26
README
View File

@@ -6,16 +6,11 @@ Installation instructions are in INSTALL.
There is no warranty - see COPYING and COPYING.LIB.
Tarballs are available from:
ftp://sourceware.org/pub/lvm2/
https://github.com/lvmteam/lvm2/releases
ftp://sources.redhat.com/pub/lvm2/
The source code is stored in git:
https://sourceware.org/git/?p=lvm2.git
git clone git://sourceware.org/git/lvm2.git
mirrored to:
https://github.com/lvmteam/lvm2
git clone https://github.com/lvmteam/lvm2.git
git clone git@github.com:lvmteam/lvm2.git
http://git.fedorahosted.org/git/lvm2.git
git clone git://git.fedorahosted.org/git/lvm2.git
Mailing list for general discussion related to LVM2:
linux-lvm@redhat.com
@@ -33,17 +28,6 @@ and multipath-tools:
dm-devel@redhat.com
Subscribe from https://www.redhat.com/mailman/listinfo/dm-devel
Website:
https://sourceware.org/lvm2/
The source code repository used until 7th June 2012 is accessible here:
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/?cvsroot=lvm2.
Report upstream bugs at:
https://bugzilla.redhat.com/enter_bug.cgi?product=LVM%20and%20device-mapper
or open issues at:
https://github.com/lvmteam/lvm2/issues
The source code repository used until 7th June 2012 is accessible using CVS:
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 login cvs
cvs -d :pserver:cvs@sourceware.org:/cvs/lvm2 checkout LVM2
The password is cvs.

62
TESTING
View File

@@ -1,62 +0,0 @@
LVM2 Test Suite
===============
The codebase contains many tests in the test subdirectory.
Before running tests
--------------------
Keep in mind the testsuite MUST run under root user.
It is recommended not to use LVM on the test machine, especially when running
tests with udev (`make check_system`.)
You MUST disable (or mask) any LVM daemons:
- lvmetad
- dmeventd
- lvmpolld
- lvmdbusd
- lvmlockd
- clvmd
- cmirrord
For running cluster tests, we are using singlenode locking. Pass
`--with-clvmd=singlenode` to configure.
NOTE: This is useful only for testing, and should not be used in produciton
code.
To run D-Bus daemon tests, existing D-Bus session is required.
Running tests
-------------
As root run:
make check
To run only tests matching a string:
make check T=test
To skip tests matching a string:
make check S=test
There are other targets and many environment variables can be used to tweak the
testsuite - for full list and description run `make -C test help`.
Installing testsuite
--------------------
It is possible to install and run a testsuite against installed LVM. Run the
following:
make -C test install
Then lvm2-testsuite binary can be executed to test installed binaries.
See `lvm2-testsuite --help` for options. The same environment variables can be
used as with `make check`.

View File

@@ -1 +1 @@
2.02.189(2)-git (2021-05-07)
2.02.165(2)-git (2016-08-15)

View File

@@ -1 +1 @@
1.02.174-git (2021-05-07)
1.02.134-git (2016-08-15)

556
WHATS_NEW
View File

@@ -1,559 +1,5 @@
Version 2.02.189 -
================================
Version 2.02.188 - 07th May 2021
================================
Fix problem with unbound variable usage within fsadm.
Avoid removing LVs on error path of lvconvert during creation volumes.
Fix crashing lvdisplay when thin volume was waiting for merge.
Support option --errorwhenfull when converting volume to thin-pool.
Improve thin-performance profile support conversion to thin-pool.
Support resize of cached volumes.
Allocation prints better error when metadata cannot fit on a single PV.
Pvmove can better resolve full thin-pool tree move.
Limit pool metadata spare to 16GiB.
Improves convertsion and allocation of pool metadata.
Support thin pool metadata 15.88GiB, adds 64MiB, thin_pool_crop_metadata=0.
Enhance lvdisplay to report raid availiable/partial.
Enhance error handling for fsadm and hanled correct fsck result.
Stop logging rename errors from persintent filter.
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
Support using BLKZEROOUT for clearing devices.
Support interruption when wipping LVs.
Add configure --enable-editline support as an alternative to readline.
Zero pool metadata on allocation (disable with allocation/zero_metadata=0).
Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn).
Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
Support interruption for bcache waiting.
Fix bcache when device has too many failing writes.
Fix bcache waiting for IO completion with failing disks.
Configure use own python path name order to prefer using python3.
Enhance reporting and error handling when creating thin volumes.
Use revert_lv() on reload error path after vg_revert().
Improve estimation of needed extents when creating thin-pool.
Use extra 1% when resizing thin-pool metadata LV with --use-policy.
Enhance --use-policy percentage rounding.
Switch code base to use flexible array syntax.
Preserve uint32_t for seqno handling.
Switch from mmap to plain read when loading regular files.
Fix running out of free buffers for async writing for larger writes.
Fix conversion to raid from striped lagging type.
Fix conversion to 'mirrored' mirror log with larger regionsize.
Fix support for lvconvert --repair used by foreign apps (i.e. Docker).
Version 2.02.187 - 24th March 2020
==================================
Avoid running cache input arg validation when creating vdo pool.
Prevent raid reshaping of stacked volumes.
Ensure minimum required region size on striped RaidLV creation.
Fix resize of thin-pool with data and metadata of different segtype.
Fix splitting mirror leg in cluster.
Fix activation order when removing merged snapshot.
Add support for DM_DEVICE_GET_TARGET_VERSION into device_mapper.
Add lvextend-raid.sh to check on RaidLV extensions synchronization.
Fix lvmetad shutdown and avoid lenghty timeouts when rebooting system.
Prevent creating VGs with PVs with different logical block sizes.
Pvmove runs in exlusively activating mode for exclusively active LVs.
Activate thin-pool layered volume as 'read-only' device.
Ignore crypto devices with UUID signature CRYPT-SUBDEV.
Enhance validation for thin and cache pool conversion and swapping.
Fixed activation on boot - lvm2 no longer activates incomplete VGs.
Version 2.02.186 - 27th August 2019
Version 2.02.165 -
===================================
Improve internal removal of cached devices.
Synchronize with udev when dropping snapshot.
Add missing device synchronization point before removing pvmove node.
Correctly set read_ahead for LVs when pvmove is finished.
Fix metadata writes from corrupting with large physical block size.
Report no_discard_passdown for cache LVs with lvs -o+kernel_discards.
Prevent shared active mirror LVs with lvmlockd.
Version 2.02.185 - 13th May 2019
================================
Fix change of monitoring in clustered volumes.
Improve -lXXX%VG modifier which improves cache segment estimation.
Add synchronization with udev before removing cached devices.
Fix missing growth of _pmspare volume when extending _tmeta volume.
Automatically grow thin metadata, when thin data gets too big.
Add support for vgsplit with cached devices.
Fix signal delivery checking race in libdaemon (lvmetad).
Add missing Before=shutdown.target to LVM2 services to fix shutdown ordering.
Version 2.02.184 - 22nd March 2019
==================================
Fix (de)activation of RaidLVs with visible SubLVs
Change scan_lvs default to 0 so LVs are not scanned for PVs.
Add scan_lvs config setting to control if lvm scans LVs for PVs.
Fix missing proper initialization of pv_list struct when adding pv.
Version 2.02.183 - 07th December 2018
=====================================
Avoid disabling lvmetad when repair does nothing.
Fix component detection for md version 0.90.
Use sync io if async io_setup fails, or use_aio=0 is set in config.
Avoid opening devices to get block size by using existing open fd.
Version 2.02.182 - 30th October 2018
====================================
Fix possible write race between last metadata block and the first extent.
Fix filtering of md 1.0 devices so they are not seen as duplicate PVs.
Fix lvconvert striped/raid0/raid0_meta -> raid6 regression.
Add After=rbdmap.service to {lvm2-activation-net,blk-availability}.service.
Fix pvs with lvmetad to avoid too many open files from filter reads.
Fix pvscan --cache to avoid too many open files from filter reads.
Reduce max concurrent aios to avoid EMFILE with many devices.
Fix lvconvert conversion attempts to linear.
Fix lvconvert raid0/raid0_meta -> striped regression.
Fix lvconvert --splitmirror for mirror type (2.02.178).
Do not pair cache policy and cache metadata format.
Fix mirrors honoring read_only_volume_list.
Version 2.02.181 - 01 August 2018
=================================
Reject conversions on raid1 LVs with split tracked SubLVs.
Reject conversions on raid1 split tracked SubLVs.
Fix dmstats list failing when no regions exist.
Reject conversions of LVs under snapshot.
Limit suggested options on incorrect option for lvconvert subcommand.
Version 2.02.180 - 19th July 2018
=================================
Never send any discard ioctl with test mode.
Fix thin-pool alloc which needs same PV for data and metadata.
Extend list of non-memlocked areas with newly linked libs.
Enhance vgcfgrestore to check for active LVs in restored VG.
lvconvert: provide possible layouts between linear and striped/raid
Fix unmonitoring of merging snapshots.
Add missing -l description in fsadm man page.
Cache can uses metadata format 2 with cleaner policy.
Avoid showing internal error in lvs output or pvmoved LVs.
Fix check if resized PV can also fit metadata area.
Reopen devices RDWR only before writing to avoid udev issues.
Change pvresize output confusing when no resize took place.
Fix lvmetad hanging on shutdown.
Fix mem leak in clvmd and more coverity issues.
Version 2.02.179 - 18th June 2018
=================================
Allow forced vgchange to lock type none on clustered VG.
Add the report field "shared".
Enable automatic metadata consistency repair on a shared VG.
Fix pvremove force on a PV with a shared VG.
Fixed vgimportclone of a PV with a shared VG.
Enable previously disallowed thin/cache commands in shared VGs.
Enable metadata-related changes on LVs active with shared lock.
Do not continue trying to use a device that cannot be opened.
Fix problems opening a device that fails and returns.
Use versionsort to fix archive file expiry beyond 100000 files.
Version 2.02.178 - 13th June 2018
=================================
Version 2.02.178-rc1 - 24th May 2018
====================================
Add libaio dependency for build.
Remove lvm1 and pool format handling and add filter to ignore them.
Move some filter checks to after disks are read.
Rework disk scanning and when it is used.
Add new io layer and shift code to using it.
Fix lvconvert's return code on degraded -m raid1 conversion.
--enable-testing switch for ./configure has been removed.
--with-snapshots switch for ./configure has been removed.
--with-mirrors switch for ./configure has been removed.
--with-raid switch for ./configure has been removed.
--with-thin switch for ./configure has been removed.
--with-cache switch for ./configure has been removed.
Include new unit-test framework and unit tests.
Extend validation of region_size for mirror segment.
Reload whole device stack when reinitilizing mirror log.
Mirrors without monitoring are WARNING and not blocking on error.
Detect too big region_size with clustered mirrors.
Fix evaluation of maximal region size for mirror log.
Enhance mirror log size estimation and use smaller size when possible.
Fix incorrect mirror log size calculation on 32bit arch.
Enhance preloading tree creating.
Fix regression on acceptance of any LV on lvconvert.
Restore usability of thin LV to be again external origin for another thin.
Keep systemd vars on change event in 69-dm-lvm-metad.rules for systemd reload.
Write systemd and non-systemd rule in 69-dm-lvm-metad.rules, GOTO active one.
Add test for activation/volume_list (Sub)LV remnants.
Disallow usage of cache format 2 with mq cache policy.
Again accept striped LV as COW LV with lvconvert -s (2.02.169).
Fix raid target version testing for supported features.
Allow activation of pools when thin/cache_check tool is missing.
Remove RaidLV on creation failure when rmeta devices can't be activated.
Add prioritized_section() to restore cookie boundaries (2.02.177).
Enhance error messages when read error happens.
Enhance mirror log initialization for old mirror target.
Skip private crypto and stratis devices.
Skip frozen raid devices from scanning.
Activate RAID SubLVs on read_only_volume_list readwrite.
Offer convenience type raid5_n converting to raid10.
Automatically avoid reading invalid snapshots during device scan.
Ensure COW device is writable even for read-only thick snapshots.
Support activation of component LVs in read-only mode.
Extend internal library to recognize and work with component LV.
Skip duplicate check for active LV when prompting for its removal.
Activate correct lock holding LV when it is cached.
Do not modify archived metadata when removing striped raid.
Fix memleak on error path when obtaining lv_raid_data_offset.
Fix compatibility size test of extended external origin.
Add external_origin visiting in for_each_sub_lv().
Ensure cluster commands drop their device cache before locking VG.
Do not report LV as remotely active when it's locally exclusive in cluster.
Add deprecate messages for usage of mirrors with mirrorlog.
Separate reporting of monitoring status and error status.
Improve validation of created strings in vgimportclone.
Add missing initialisation of mem pool in systemd generator.
Do not reopen output streams for multithreaded users of liblvm.
Configure ensures /usr/bin dir is checked for dmpd tools.
Restore pvmove support for wide-clustered active volumes (2.02.177).
Avoid non-exclusive activation of exclusive segment types.
Fix trimming sibling PVs when doing a pvmove of raid subLVs.
Preserve exclusive activation during thin snaphost merge.
Avoid exceeding array bounds in allocation tag processing.
Add --lockopt to common options and add option to skip selected locks.
Version 2.02.177 - 18th December 2017
=====================================
When writing text metadata content, use complete 4096 byte blocks.
Change text format metadata alignment from 512 to 4096 bytes.
When writing metadata, consistently skip mdas marked as failed.
Refactor and adjust text format metadata alignment calculation.
Fix python3 path in lvmdbusd to use value detected by configure.
Reduce checks for active LVs in vgchange before background polling.
Ensure _node_send_message always uses clean status of thin pool.
Fix lvmlockd to use pool lock when accessing _tmeta volume.
Report expected sanlock_convert errors only when retries fail.
Avoid blocking in sanlock_convert on SH to EX lock conversion.
Deactivate missing raid LV legs (_rimage_X-missing_Y_Z) on decativation.
Skip read-modify-write when entire block is replaced.
Categorise I/O with reason annotations in debug messages.
Allow extending of raid LVs created with --nosync after a failed repair.
Command will lock memory only when suspending volumes.
Merge segments when pvmove is finished.
Remove label_verify that has never been used.
Ensure very large numbers used as arguments are not casted to lower values.
Enhance reading and validation of options stripes and stripes_size.
Fix printing of default stripe size when user is not using stripes.
Activation code for pvmove automatically discovers holding LVs for resume.
Make a pvmove LV locking holder.
Do not change critical section counter on resume path without real resume.
Enhance activation code to automatically suspend pvmove participants.
Prevent conversion of thin volumes to snapshot origin when lvmlockd is used.
Correct the steps to change lock type in lvmlockd man page.
Retry lock acquisition on recognized sanlock errors.
Fix lock manager error codes in lvmlockd.
Remove unnecessary single read from lvmdiskscan.
Check raid reshape flags in vg_validate().
Add support for pvmove of cache and snapshot origins.
Avoid using precommitted metadata for suspending pvmove tree.
Ehnance pvmove locking.
Deactivate activated LVs on error path when pvmove activation fails.
Add "io" to log/debug_classes for logging low-level I/O.
Eliminate redundant nested VG metadata in VG struct.
Avoid importing persistent filter in vgscan/pvscan/vgrename.
Fix memleak of string buffer when vgcfgbackup runs in secure mode.
Do not print error when clvmd cannot find running clvmd.
Prevent start of new merge of snapshot if origin is already being merged.
Fix offered type for raid6_n_6 to raid5 conversion (raid5_n).
Deactivate sub LVs when removing unused cache-pool.
Do not take backup with suspended devices.
Avoid RAID4 activation on incompatible kernels under all circumstances.
Reject conversion request to striped/raid0 on 2-legged raid4/5.
Version 2.02.176 - 3rd November 2017
====================================
Keep Install section only in lvm2-{lvmetad,lvmpolld}.socket systemd unit.
Fix segfault in lvm_pv_remove in liblvm. (2.02.173)
Do not allow storing VG metadata with LV without any segment.
Fix printed message when thin snapshot was already merged.
Remove created spare LV when creation of thin-pool failed.
Avoid reading ignored metadata when mda gets used again.
Fix detection of moved PVs in vgsplit. (2.02.175)
Ignore --stripes/--stripesize on RAID takeover
Improve used paths for generated systemd units and init shells.
Disallow creation of snapshot of mirror/raid subLV (was never supported).
Fix regression in more advanced vgname extraction in lvconvert (2.02.169).
Allow lvcreate to be used for caching of _tdata LV.
Avoid internal error when resizing cache type _tdata LV (not yet supported).
Show original converted names when lvconverting LV to pool volume.
Move lib code used only by liblvm into metadata-liblvm.c.
Distinguish between device not found and excluded by filter.
Monitor external origin LVs.
Remove the replicator code, including configure --with-replicators.
Allow lvcreate --type mirror to work with 100%FREE.
Improve selection of resource name for complex volume activation lock.
Avoid cutting first character of resource name for activation lock.
Support for encrypted devices in fsadm.
Improve thin pool overprovisioning and repair warning messages.
Fix incorrect adjustment of region size on striped RaidLVs.
Version 2.02.175 - 6th October 2017
===================================
Use --help with blockdev when checking for --getsize64 support in fsadm.
Dump lvmdbusd debug information with SIGUSR1.
Fix metadata corruption in vgsplit and vgmerge intermediate states.
Add PV_MOVED_VG PV status flag to mark PVs moving between VGs.
Fix lvmdbus hang and recognise unknown VG correctly.
Improve error messages when command rules fail.
Require LV name with pvmove in a shared VG.
Allow shared active mirror LVs with lvmlockd, dlm, and cmirrord.
Support lvconvert --repair with cache and cachepool volumes.
lvconvert --repair respects --poolmetadataspare option.
Mark that we don't plan to develop liblvm2app and python bindings any further.
Fix thin pool creation in shared VG. (2.02.173)
Version 2.02.174 - 13th September 2017
======================================
Prevent raid1 split with trackchanges in a shared VG.
Avoid double unlocking of client & lockspace mutexes in lvmlockd.
Fix leaking of file descriptor for non-blocking filebased locking.
Fix check for 2nd mda at end of disk fits if using pvcreate --restorefile.
Use maximum metadataarea size that fits with pvcreate --restorefile.
Always clear cached bootloaderarea when wiping label e.g. in pvcreate.
Disallow --bootloaderareasize with pvcreate --restorefile.
Fix lvmlockd check for running lock managers during lock adoption.
Add --withgeneralpreamble and --withlocalpreamble to lvmconfig.
Improve makefiles' linking.
Fix some paths in generated makefiles to respected configured settings.
Add warning when creating thin-pool with zeroing and chunk size >= 512KiB.
Introduce exit code 4 EINIT_FAILED to replace -1 when initialisation fails.
Add synchronization points with udev during reshape of raid LVs.
Version 2.02.173 - 20th July 2017
=================================
Add synchronization points with udev during conversion of raid LVs.
Improve --size args validation and report more detailed error message.
Initialize debugging mutex before any debug message in clvmd.
Log error instead of warn when noticing connection problem with lvmetad.
Fix memory leak in lvmetad when working with duplicates.
Remove restrictions on reshaping open and clustered raid devices.
Add incompatible data_offset to raid metadata to fix reshape activation.
Accept 'lvm -h' and 'lvm --help' as well as 'lvm help' for help.
Suppress error message from accept() on clean lvmetad shutdown.
Tidy clvmd client list processing and fix segfaults.
Protect clvmd debug log messages with mutex and add client id.
Fix shellcheck reported issues for script files.
Version 2.02.172 - 28th June 2017
=================================
Add missing NULL to argv array when spliting cmdline arguments.
Add display_percent helper function for printing percent values.
lvconvert --repair handles failing raid legs (present but marked 'D'ead).
Do not lvdisplay --maps unset settings of cache pool.
Fix lvdisplay --maps for cache pool without policy settings.
Support aborting of flushing cache LV.
Reenable conversion of data and metadata thin-pool volumes to raid.
Improve raid status reporting with lvs.
No longer necessary to '--force' a repair for RAID1.
Linear to RAID1 upconverts now use "recover" sync action, not "resync".
Improve lvcreate --cachepool arg validation.
Limit maximum size of thin-pool for specific chunk size.
Print a warning about in-use PVs with no VG using them.
Disable automatic clearing of PVs that look like in-use orphans.
Cache format2 flag is now using segment name type field.
Support storing status flags via segtype name field.
Stop using '--yes' mode when fsadm runs without terminal.
Extend validation of filesystems resized by fsadm.
Enhance lvconvert automatic settings of possible (raid) LV types.
Allow lvchange to change properties on a thin pool data sub LV.
Fix lvcreate extent percentage calculation for mirrors.
Don't reinstate still-missing devices when correcting inconsistent metadata.
Properly handle subshell return codes in fsadm.
Disallow cachepool creation with policy cleaner and mode writeback.
Version 2.02.171 - 3rd May 2017
===============================
Fix memory warnings by using mempools for command definition processing.
Fix running commands from a script file.
Add pvcreate prompt when device size doesn't match setphysicalvolumesize.
lvconvert - preserve region size on raid1 image count changes
Adjust pvresize/pvcreate messages and prompt if underlying dev size differs.
raid - sanely handle insufficient space on takeover.
Fix configure --enable-notify-dbus status message.
Change configure option name prefix from --enable-lockd to --enable-lvmlockd.
lvcreate - raise mirror/raid default regionsize to 2MiB
Add missing configurable prefix to configuration file installation directory.
Version 2.02.170 - 13th April 2017
==================================
Introduce global/fsadm_executable to make fsadm path configurable.
Look for limited thin pool metadata size when using 16G metadata.
Add lvconvert pool creation rule disallowing options with poolmetadata.
Fix lvconvert when the same LV is incorrectly reused in options.
Fix lvconvert VG name validation in option values.
Fix missing lvmlockd LV locks in lvchange and lvconvert.
Fix dmeventd setup for lvchange --poll.
Fix use of --poll and --monitor with lvchange and vgchange.
Disallow lvconvert of hidden LV to a pool.
Ignore --partial option when not used for activation.
Allow --activationmode option with lvchange --refresh.
Better message on lvconvert --regionsize
Allow valid lvconvert --regionsize change
Add raid10 alias raid10_near
Handle insufficient PVs on lvconvert takeover
Fix SIGINT blocking to prevent corrupted metadata
Fix systemd unit existence check for lvmconf --services --startstopservices.
Check and use PATH_MAX buffers when creating vgrename device paths.
Version 2.02.169 - 28th March 2017
==================================
Automatically decide whether '-' in a man page is a hyphen or a minus sign.
Add build-time configuration command line to 'lvm version' output.
Handle known table line parameter order change in specific raid target vsns.
Conditionally reject raid convert to striped/raid0* after reshape.
Ensure raid6 upconversion restrictions.
Adjust mirror & raid dmeventd plugins for new lvconvert --repair behaviour.
Disable lvmetad when lvconvert --repair is run.
Remove obsolete lvmchange binary - convert to built-in command.
Show more information for cached volumes in lvdisplay [-m].
Add option for lvcreate/lvconvert --cachemetadataformat auto|1|2.
Support cache segment with configurable metadata format.
Add allocation/cache_metadata_format profilable settings.
Use function cache_set_params() for both lvcreate and lvconvert.
Skip rounding on cache chunk size boudary when create cache LV.
Improve cache_set_params support for chunk_size selection.
Fix metadata profile allocation/cache_[mode|policy] setting.
Fix missing support for using allocation/cache_pool_chunk_size setting.
Upstream git moved to https://sourceware.org/git/?p=lvm2
Support conversion of raid type, stripesize and number of disks
Reject writemostly/writebehind in lvchange during resynchronization.
Deactivate active origin first before removal for improved workflow.
Fix regression of accepting both --type and -m with lvresize. (2.02.158)
Add lvconvert --swapmetadata, new specific way to swap pool metadata LVs.
Add lvconvert --startpoll, new specific way to start polling conversions.
Add lvconvert --mergethin, new specific way to merge thin snapshots.
Add lvconvert --mergemirrors, new specific way to merge split mirrors.
Add lvconvert --mergesnapshot, new specific way to combine cow LVs.
Split up lvconvert code based on command definitions.
Split up lvchange code based on command definitions.
Generate help output and man pages from command definitions.
Verify all command line items against command definition.
Match every command run to one command definition.
Specify every allowed command definition/syntax in command-lines.in.
Add extra memory page when limiting pthread stack size in clvmd.
Support striped/raid0* <-> raid10_near conversions.
Support shrinking of RaidLVs.
Support region size changes on existing RaidLVs.
Avoid parallel usage of cpg_mcast_joined() in clvmd with corosync.
Support raid6_{ls,rs,la,ra}_6 segment types and conversions from/to it.
Support raid6_n_6 segment type and conversions from/to it.
Support raid5_n segment type and conversions from/to it.
Support new internal command _dmeventd_thin_command.
Introduce new dmeventd/thin_command configurable setting.
Use new default units 'r' for displaying sizes.
Also unmount mount point on top of MD device if using blkdeactivate -u.
Restore check preventing resize of cache type volumes (2.02.158).
Add missing udev sync when flushing dirty cache content.
vgchange -p accepts only uint32 numbers.
Report thin LV date for merged LV when the merge is in progress.
Detect if snapshot merge really started before polling for progress.
Checking LV for merging origin requires also it has merged snapshot.
Extend validation of metadata processing.
Enable usage of cached volumes as snapshot origin LV.
Fix displayed lv name when splitting snapshot (2.02.146).
Warn about command not making metadata backup just once per command.
Enable usage of cached volume as thin volume's external origin.
Support cache volume activation with -real layer.
Improve search of lock-holder for external origin and thin-pool.
Support status checking of cache volume used in layer.
Avoid shifting by one number of blocks when clearing dirty cache volume.
Extend metadata validation of external origin LV use count.
Fix dm table when the last user of active external origin is removed.
Improve reported lvs status for active external origin volume.
Fix table load for splitted RAID LV and require explicit activation.
Always active splitted RAID LV exclusively locally.
Do not use LV RAID status bit for segment status.
Check segtype directly instead of checking RAID in segment status.
Reusing exiting code for raid image removal.
Fix pvmove leaving -pvmove0 error device in clustered VG.
Avoid adding extra '_' at end of raid extracted images or metadata.
Optimize another _rmeta clearing code.
Fix deactivation of raid orphan devices for clustered VG.
Fix lvconvert raid1 to mirror table reload order.
Add internal function for separate mirror log preparation.
Fix segfault in lvmetad from missing NULL in daemon_reply_simple.
Simplify internal _info_run() and use _setup_task_run() for mknod.
Better API for internal function _setup_task_run.
Avoid using lv_has_target_type() call within lv_info_with_seg_status.
Simplify internal lv_info_with_seg_status API.
Decide which status is needed in one place for lv_info_with_seg_status.
Fix matching of LV segment when checking for it info status.
Report log_warn when status cannot be parsed.
Test segment type before accessing segment members when checking status.
Implement compatible target function for stripe segment.
Use status info to report merge failed and snapshot invalid lvs fields.
Version 2.02.168 - 30th November 2016
=====================================
Display correct sync_percent on large RaidLVs
lvmdbusd --blackboxsize <n> added, used to override default size of 16
Allow a transiently failed RaidLV to be refreshed
Use lv_update_and_reload() inside mirror code where it applies.
Preserve mirrored status for temporary layered mirrors.
Use transient raid check before repairing raid volume.
Implement transient status check for raid volumes.
Only log msg as debug if lvm2-lvmdbusd unit missing for D-Bus notification.
Avoid duplicated underscore in name of extracted LV image.
Missing stripe filler now could be also 'zero'.
lvconvert --repair accepts --interval and --background option.
More efficiently prepare _rmeta devices when creating a new raid LV.
Version 2.02.167 - 5th November 2016
====================================
Use log_error in regex and sysfs filter to describe reason of failure.
Fix blkdeactivate to deactivate dev stack if dev on top already unmounted.
Prevent non-synced raid1 repair unless --force
Prevent raid4 creation/conversion on non-supporting kernels
Add direct striped -> raid4 conversion
Fix raid4 parity image pair position on conversions from striped/raid0*
Fix a few unconverted return code values for some lvconvert error path.
Disable lvconvert of thin pool to raid while active.
Disable systemd service start rate limiting for lvm2-pvscan@.service.
Version 2.02.166 - 26th September 2016
======================================
Fix lvm2-activation-generator to read all LVM2 config sources. (2.02.155)
Fix lvchange-rebuild-raid.sh to cope with older target versions.
Use dm_config_parse_without_dup_node_check() to speedup metadata reading.
Fix lvconvert --repair regression
Fix reported origin lv field for cache volumes. (2.02.133)
Always specify snapshot cow LV for monitoring not internal LV. (2.02.165)
Fix lvchange --discard|--zero for active thin-pool.
Enforce 4MiB or 25% metadata free space for thin pool operations.
Fix lock-holder device for thin pool with inactive thin volumes.
Use --alloc normal for mirror logs even if the mimages were stricter.
Use O_DIRECT to gather metadata in lvmdump.
Ignore creation_time when checking for matching metadata for lvmetad.
Fix possible NULL pointer derefence when checking for monitoring.
Add lvmreport(7) man page.
Don't install lvmraid(7) man page when raid excluded. (2.02.165)
Report 0% as dirty (copy%) for cache without any used block.
Fix lvm2api reporting of cache data and metadata percent.
Restore reporting of metadata usage for cache volumes (2.02.155).
Support raid scrubbing on cache origin LV.
Version 2.02.165 - 7th September 2016
=====================================
Add lvmraid(7) man page.
Use udev db to check for mpath components before running pvscan for lvmetad.
Use lsblk -s and lsblk -O in lvmdump only if these options are supported.
Fix number of stripes shown in lvcreate raid10 message when too many.
Change lvmdbusd to use new lvm shell facilities.
Do not monitor cache-pool metadata when LV is just being cleared.
Add allocation/cache_pool_max_chunks to prevent misuse of cache target.
Give error not segfault in lvconvert --splitmirrors when PV lies outside LV.
Fix typo in report/columns_as_rows config option name recognition (2.02.99).
Avoid PV tags when checking allocation against parallel PVs.
Disallow mirror conversions of raid10 volumes.
Fix dmeventd unmonitoring when segment type (and dso) changes.
Don't allow lvconvert --repair on raid0 devices or attempt to monitor them.
No longer adjust incorrect number of raid stripes supplied to lvcreate.
Move lcm and gcd to lib/misc.
Fix vgsplit of external origins. (2.02.162)
Prohibit creation of RAID LVs unless VG extent size is at least the page size.
Suppress some unnecessary --stripesize parameter warnings.
Fix 'pvmove -n name ...' to prohibit collocation of RAID SubLVs

View File

@@ -1,177 +1,5 @@
Version 1.02.174 -
================================
Version 1.02.172 - 07th May 2021
================================
Add dm_tree_node_add_thin_pool_target_v1 with crop_metadata support.
Add support for VDO in blkdeactivate script.
Try to remove all created devices on dm preload tree error path.
Fix dm_list interators with gcc 10 optimization (-ftree-pta).
Dmeventd handles timer without looping on short intervals.
Version 1.02.170 - 24th March 2020
==================================
Add support for DM_DEVICE_GET_TARGET_VERSION.
Version 1.02.164 - 27th August 2019
Version 1.02.134 -
===================================
Add debug of dmsetup udevcomplete with hexa print DM_COOKIE_COMPLETED.
Fix versioning of dm_stats_create_region and dm_stats_create_region.
Parsing of cache status understand no_discard_passdown.
Version 1.02.158 - 13th May 2019
================================
Version 1.02.156 - 22nd March 2019
==================================
Ensure migration_threshold for cache is at least 8 chunks.
Enhance ioctl flattening and add parameters only when needed.
Add DM_DEVICE_ARM_POLL for API completness matching kernel.
Version 1.02.154 - 07th December 2018
=====================================
Do not add parameters for RESUME with DM_DEVICE_CREATE dm task.
Fix dmstats report printing no output.
Version 1.02.152 - 30th October 2018
====================================
Add hot fix to avoiding locking collision when monitoring thin-pools.
Version 1.02.150 - 01 August 2018
=================================
Add vdo plugin for monitoring VDO devices.
Version 1.02.149 - 19th July 2018
=================================
Version 1.02.148 - 18th June 2018
=================================
Version 1.02.147 - 13th June 2018
=================================
Version 1.02.147-rc1 - 24th May 2018
====================================
Reuse uname() result for mirror target.
Recognize also mounted btrfs through dm_device_has_mounted_fs().
Add missing log_error() into dm_stats_populate() returning 0.
Avoid calling dm_stats_populat() for DM devices without any stats regions.
Support DM_DEBUG_WITH_LINE_NUMBERS envvar for debug msg with source:line.
Configured command for thin pool threshold handling gets whole environment.
Fix tests for failing dm_snprintf() in stats code.
Parsing mirror status accepts 'userspace' keyword in status.
Introduce dm_malloc_aligned for page alignment of buffers.
Version 1.02.146 - 18th December 2017
=====================================
Activation tree of thin pool skips duplicated check of pool status.
Remove code supporting replicator target.
Do not ignore failure of _info_by_dev().
Propagate delayed resume for pvmove subvolumes.
Suppress integrity encryption keys in 'table' output unless --showkeys supplied.
Version 1.02.145 - 3rd November 2017
====================================
Keep Install section only in dm-event.socket systemd unit.
Issue a specific error with dmsetup status if device is unknown.
Fix RT_LIBS reference in generated libdevmapper.pc for pkg-config
Version 1.02.144 - 6th October 2017
===================================
Schedule exit when received SIGTERM in dmeventd.
Also try to unmount /boot on blkdeactivate -u if on top of supported device.
Use blkdeactivate -r wait in blk-availability systemd service/initscript.
Add blkdeactivate -r wait option to wait for MD resync/recovery/reshape.
Fix blkdeactivate regression with failing DM/MD devs deactivation (1.02.142).
Fix typo in blkdeactivate's '--{dm,lvm,mpath}options' option name.
Correct return value testing when get reserved values for reporting.
Take -S with dmsetup suspend/resume/clear/wipe_table/remove/deps/status/table.
Version 1.02.143 - 13th September 2017
======================================
Restore umask when creation of node fails.
Add --concise to dmsetup create for many devices with tables in one command.
Accept minor number without major in library when it knows dm major number.
Introduce single-line concise table output format: dmsetup table --concise
Version 1.02.142 - 20th July 2017
=================================
Create /dev/disk/by-part{uuid,label} and gpt-auto-root symlinks with udev.
Version 1.02.141 - 28th June 2017
=================================
Fix reusing of dm_task structure for status reading (used by dmeventd).
Add dm_percent_to_round_float for adjusted percentage rounding.
Reset array with dead rimage devices once raid gets in sync.
Drop unneeded --config option from raid dmeventd plugin.
dm_get_status_raid() handle better some incosistent md statuses.
Accept truncated files in calls to dm_stats_update_regions_from_fd().
Restore Warning by 5% increment when thin-pool is over 80% (1.02.138).
Version 1.02.140 - 3rd May 2017
===============================
Add missing configure --enable-dmfilemapd status message and fix --disable.
Version 1.02.139 - 13th April 2017
==================================
Fix assignment in _target_version() when dm task can't run.
Flush stdout on each iteration when using --count or --interval.
Show detailed error message when execvp fails while starting dmfilemapd.
Fix segmentation fault when dmfilemapd is run with no arguments.
Numerous minor dmfilemapd fixes from coverity.
Version 1.02.138 - 28th March 2017
==================================
Support additional raid5/6 configurations.
Provide dm_tree_node_add_cache_target@base compatible symbol.
Support DM_CACHE_FEATURE_METADATA2, new cache metadata format 2.
Improve code to handle mode mask for cache nodes.
Cache status check for passthrough also require trailing space.
Add extra memory page when limiting pthread stack size in dmeventd.
Avoids immediate resume when preloaded device is smaller.
Do not suppress kernel key description in dmsetup table output for dm-crypt.
Support configurable command executed from dmeventd thin plugin.
Support new R|r human readable units output format.
Thin dmeventd plugin reacts faster on lvextend failure path with umount.
Add dm_stats_bind_from_fd() to bind a stats handle from a file descriptor.
Do not try call callback when reverting activation on error path.
Fix file mapping for extents with physically adjacent extents in dmstats.
Validation vsnprintf result in runtime translate of dm_log (1.02.136).
Separate filemap extent allocation from region table in dmstats.
Fix segmentation fault when filemap region creation fails in dmstats.
Fix performance of region cleanup for failed filemap creation in dmstats.
Fix very slow region deletion with many regions in dmstats.
Version 1.02.137 - 30th November 2016
=====================================
Document raid status values.
Always exit dmsetup with success when asked to display help/version.
Version 1.02.136 - 5th November 2016
====================================
Log failure of raid device with log_error level.
Use dm_log_with_errno and translate runtime to dm_log only when needed.
Make log messages from dm and lvm library different from dmeventd.
Notice and Info messages are again logged from dmeventd and its plugins.
Dmeventd now also respects DM_ABORT_ON_INTERNAL_ERRORS as libdm based tool.
Report as non default dm logging also when logging with errno was changed.
Use log_level() macro to consistently decode message log level in dmeventd.
Still produce output when dmsetup dependency tree building finds dev missing.
Check and report pthread_sigmask() failure in dmeventd.
Check mem alloc fail in _canonicalize_field_ids().
Use unsigned math when checking more then 31 legs of raid.
Fix 'dmstats delete' with dmsetup older than v1.02.129
Fix stats walk segfault with dmsetup older than v1.02.129
Version 1.02.135 - 26th September 2016
======================================
Fix man entry for dmsetup status.
Introduce new dm_config_parse_without_dup_node_check().
Don't omit last entry in dmstats list --group.
Version 1.02.134 - 7th September 2016
=====================================
Improve explanation of udev fallback in libdevmapper.h.
Version 1.02.133 - 10th August 2016
===================================

View File

@@ -61,174 +61,3 @@ AC_DEFUN([AC_TRY_LDFLAGS],
ifelse([$4], [], [:], [$4])
fi
])
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_gcc_builtin.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_GCC_BUILTIN(BUILTIN)
#
# DESCRIPTION
#
# This macro checks if the compiler supports one of GCC's built-in
# functions; many other compilers also provide those same built-ins.
#
# The BUILTIN parameter is the name of the built-in function.
#
# If BUILTIN is supported define HAVE_<BUILTIN>. Keep in mind that since
# builtins usually start with two underscores they will be copied over
# into the HAVE_<BUILTIN> definition (e.g. HAVE___BUILTIN_EXPECT for
# __builtin_expect()).
#
# The macro caches its result in the ax_cv_have_<BUILTIN> variable (e.g.
# ax_cv_have___builtin_expect).
#
# The macro currently supports the following built-in functions:
#
# __builtin_assume_aligned
# __builtin_bswap16
# __builtin_bswap32
# __builtin_bswap64
# __builtin_choose_expr
# __builtin___clear_cache
# __builtin_clrsb
# __builtin_clrsbl
# __builtin_clrsbll
# __builtin_clz
# __builtin_clzl
# __builtin_clzll
# __builtin_complex
# __builtin_constant_p
# __builtin_ctz
# __builtin_ctzl
# __builtin_ctzll
# __builtin_expect
# __builtin_ffs
# __builtin_ffsl
# __builtin_ffsll
# __builtin_fpclassify
# __builtin_huge_val
# __builtin_huge_valf
# __builtin_huge_vall
# __builtin_inf
# __builtin_infd128
# __builtin_infd32
# __builtin_infd64
# __builtin_inff
# __builtin_infl
# __builtin_isinf_sign
# __builtin_nan
# __builtin_nand128
# __builtin_nand32
# __builtin_nand64
# __builtin_nanf
# __builtin_nanl
# __builtin_nans
# __builtin_nansf
# __builtin_nansl
# __builtin_object_size
# __builtin_parity
# __builtin_parityl
# __builtin_parityll
# __builtin_popcount
# __builtin_popcountl
# __builtin_popcountll
# __builtin_powi
# __builtin_powif
# __builtin_powil
# __builtin_prefetch
# __builtin_trap
# __builtin_types_compatible_p
# __builtin_unreachable
#
# Unsuppored built-ins will be tested with an empty parameter set and the
# result of the check might be wrong or meaningless so use with care.
#
# LICENSE
#
# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
serial 3
AC_DEFUN([AX_GCC_BUILTIN], [
AS_VAR_PUSHDEF([ac_var], [ax_cv_have_$1])
AC_CACHE_CHECK([for $1], [ac_var], [
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [
m4_case([$1],
[__builtin_assume_aligned], [$1("", 0)],
[__builtin_bswap16], [$1(0)],
[__builtin_bswap32], [$1(0)],
[__builtin_bswap64], [$1(0)],
[__builtin_choose_expr], [$1(0, 0, 0)],
[__builtin___clear_cache], [$1("", "")],
[__builtin_clrsb], [$1(0)],
[__builtin_clrsbl], [$1(0)],
[__builtin_clrsbll], [$1(0)],
[__builtin_clz], [$1(0)],
[__builtin_clzl], [$1(0)],
[__builtin_clzll], [$1(0)],
[__builtin_complex], [$1(0.0, 0.0)],
[__builtin_constant_p], [$1(0)],
[__builtin_ctz], [$1(0)],
[__builtin_ctzl], [$1(0)],
[__builtin_ctzll], [$1(0)],
[__builtin_expect], [$1(0, 0)],
[__builtin_ffs], [$1(0)],
[__builtin_ffsl], [$1(0)],
[__builtin_ffsll], [$1(0)],
[__builtin_fpclassify], [$1(0, 1, 2, 3, 4, 0.0)],
[__builtin_huge_val], [$1()],
[__builtin_huge_valf], [$1()],
[__builtin_huge_vall], [$1()],
[__builtin_inf], [$1()],
[__builtin_infd128], [$1()],
[__builtin_infd32], [$1()],
[__builtin_infd64], [$1()],
[__builtin_inff], [$1()],
[__builtin_infl], [$1()],
[__builtin_isinf_sign], [$1(0.0)],
[__builtin_nan], [$1("")],
[__builtin_nand128], [$1("")],
[__builtin_nand32], [$1("")],
[__builtin_nand64], [$1("")],
[__builtin_nanf], [$1("")],
[__builtin_nanl], [$1("")],
[__builtin_nans], [$1("")],
[__builtin_nansf], [$1("")],
[__builtin_nansl], [$1("")],
[__builtin_object_size], [$1("", 0)],
[__builtin_parity], [$1(0)],
[__builtin_parityl], [$1(0)],
[__builtin_parityll], [$1(0)],
[__builtin_popcount], [$1(0)],
[__builtin_popcountl], [$1(0)],
[__builtin_popcountll], [$1(0)],
[__builtin_powi], [$1(0, 0)],
[__builtin_powif], [$1(0, 0)],
[__builtin_powil], [$1(0, 0)],
[__builtin_prefetch], [$1("")],
[__builtin_trap], [$1()],
[__builtin_types_compatible_p], [$1(int, int)],
[__builtin_unreachable], [$1()],
[m4_warn([syntax], [Unsupported built-in $1, the test may fail])
$1()]
)
])],
[AS_VAR_SET([ac_var], [yes])],
[AS_VAR_SET([ac_var], [no])])
])
AS_IF([test yes = AS_VAR_GET([ac_var])],
[AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_$1), 1,
[Define to 1 if the system has the `$1' built-in function])], [])
AS_VAR_POPDEF([ac_var])
])

296
aclocal.m4 vendored
View File

@@ -1,6 +1,6 @@
# generated automatically by aclocal 1.16.2 -*- Autoconf -*-
# generated automatically by aclocal 1.15 -*- Autoconf -*-
# Copyright (C) 1996-2020 Free Software Foundation, Inc.
# Copyright (C) 1996-2014 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -13,7 +13,7 @@
m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_python_module.html
# http://www.gnu.org/software/autoconf-archive/ax_python_module.html
# ===========================================================================
#
# SYNOPSIS
@@ -37,7 +37,7 @@ m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 9
#serial 8
AU_ALIAS([AC_PYTHON_MODULE], [AX_PYTHON_MODULE])
AC_DEFUN([AX_PYTHON_MODULE],[
@@ -69,63 +69,32 @@ AC_DEFUN([AX_PYTHON_MODULE],[
fi
])
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 11 (pkg-config-0.29.1)
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 1 (pkg-config-0.24)
#
# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
dnl Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
dnl Copyright © 2012-2015 Dan Nicholson <dbn.lists@gmail.com>
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
dnl the Free Software Foundation; either version 2 of the License, or
dnl (at your option) any later version.
dnl
dnl This program is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of
dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
dnl General Public License for more details.
dnl
dnl You should have received a copy of the GNU General Public License
dnl along with this program; if not, write to the Free Software
dnl Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
dnl 02111-1307, USA.
dnl
dnl As a special exception to the GNU General Public License, if you
dnl distribute this file as part of a program that contains a
dnl configuration script generated by Autoconf, you may include it under
dnl the same distribution terms that you use for the rest of that
dnl program.
dnl PKG_PREREQ(MIN-VERSION)
dnl -----------------------
dnl Since: 0.29
dnl
dnl Verify that the version of the pkg-config macros are at least
dnl MIN-VERSION. Unlike PKG_PROG_PKG_CONFIG, which checks the user's
dnl installed version of pkg-config, this checks the developer's version
dnl of pkg.m4 when generating configure.
dnl
dnl To ensure that this macro is defined, also add:
dnl m4_ifndef([PKG_PREREQ],
dnl [m4_fatal([must install pkg-config 0.29 or later before running autoconf/autogen])])
dnl
dnl See the "Since" comment for each macro you use to see what version
dnl of the macros you require.
m4_defun([PKG_PREREQ],
[m4_define([PKG_MACROS_VERSION], [0.29.1])
m4_if(m4_version_compare(PKG_MACROS_VERSION, [$1]), -1,
[m4_fatal([pkg.m4 version $1 or higher is required but ]PKG_MACROS_VERSION[ found])])
])dnl PKG_PREREQ
dnl PKG_PROG_PKG_CONFIG([MIN-VERSION])
dnl ----------------------------------
dnl Since: 0.16
dnl
dnl Search for the pkg-config tool and set the PKG_CONFIG variable to
dnl first found in the path. Checks that the version of pkg-config found
dnl is at least MIN-VERSION. If MIN-VERSION is not specified, 0.9.0 is
dnl used since that's the first version where most current features of
dnl pkg-config existed.
# PKG_PROG_PKG_CONFIG([MIN-VERSION])
# ----------------------------------
AC_DEFUN([PKG_PROG_PKG_CONFIG],
[m4_pattern_forbid([^_?PKG_[A-Z_]+$])
m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$])
@@ -147,19 +116,18 @@ if test -n "$PKG_CONFIG"; then
PKG_CONFIG=""
fi
fi[]dnl
])dnl PKG_PROG_PKG_CONFIG
])# PKG_PROG_PKG_CONFIG
dnl PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------------------------------
dnl Since: 0.18
dnl
dnl Check to see whether a particular set of modules exists. Similar to
dnl PKG_CHECK_MODULES(), but does not set variables or print errors.
dnl
dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
dnl only at the first occurence in configure.ac, so if the first place
dnl it's called might be skipped (such as if it is within an "if", you
dnl have to call PKG_CHECK_EXISTS manually
# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
#
# Check to see whether a particular set of modules exists. Similar
# to PKG_CHECK_MODULES(), but does not set variables or print errors.
#
# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
# only at the first occurence in configure.ac, so if the first place
# it's called might be skipped (such as if it is within an "if", you
# have to call PKG_CHECK_EXISTS manually
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_EXISTS],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
if test -n "$PKG_CONFIG" && \
@@ -169,10 +137,8 @@ m4_ifvaln([$3], [else
$3])dnl
fi])
dnl _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
dnl ---------------------------------------------
dnl Internal wrapper calling pkg-config via PKG_CONFIG and setting
dnl pkg_failed based on the result.
# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES])
# ---------------------------------------------
m4_define([_PKG_CONFIG],
[if test -n "$$1"; then
pkg_cv_[]$1="$$1"
@@ -184,11 +150,10 @@ m4_define([_PKG_CONFIG],
else
pkg_failed=untried
fi[]dnl
])dnl _PKG_CONFIG
])# _PKG_CONFIG
dnl _PKG_SHORT_ERRORS_SUPPORTED
dnl ---------------------------
dnl Internal check to see if pkg-config supports short errors.
# _PKG_SHORT_ERRORS_SUPPORTED
# -----------------------------
AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
@@ -196,17 +161,19 @@ if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
else
_pkg_short_errors_supported=no
fi[]dnl
])dnl _PKG_SHORT_ERRORS_SUPPORTED
])# _PKG_SHORT_ERRORS_SUPPORTED
dnl PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl --------------------------------------------------------------
dnl Since: 0.4.0
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES might not happen, you should be sure to include an
dnl explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
# [ACTION-IF-NOT-FOUND])
#
#
# Note that if there is a possibility the first call to
# PKG_CHECK_MODULES might not happen, you should be sure to include an
# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac
#
#
# --------------------------------------------------------------
AC_DEFUN([PKG_CHECK_MODULES],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl
@@ -260,40 +227,16 @@ else
AC_MSG_RESULT([yes])
$3
fi[]dnl
])dnl PKG_CHECK_MODULES
])# PKG_CHECK_MODULES
dnl PKG_CHECK_MODULES_STATIC(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND],
dnl [ACTION-IF-NOT-FOUND])
dnl ---------------------------------------------------------------------
dnl Since: 0.29
dnl
dnl Checks for existence of MODULES and gathers its build flags with
dnl static libraries enabled. Sets VARIABLE-PREFIX_CFLAGS from --cflags
dnl and VARIABLE-PREFIX_LIBS from --libs.
dnl
dnl Note that if there is a possibility the first call to
dnl PKG_CHECK_MODULES_STATIC might not happen, you should be sure to
dnl include an explicit call to PKG_PROG_PKG_CONFIG in your
dnl configure.ac.
AC_DEFUN([PKG_CHECK_MODULES_STATIC],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
_save_PKG_CONFIG=$PKG_CONFIG
PKG_CONFIG="$PKG_CONFIG --static"
PKG_CHECK_MODULES($@)
PKG_CONFIG=$_save_PKG_CONFIG[]dnl
])dnl PKG_CHECK_MODULES_STATIC
dnl PKG_INSTALLDIR([DIRECTORY])
dnl -------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable pkgconfigdir as the location where a module
dnl should install pkg-config .pc files. By default the directory is
dnl $libdir/pkgconfig, but the default can be changed by passing
dnl DIRECTORY. The user can override through the --with-pkgconfigdir
dnl parameter.
# PKG_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable pkgconfigdir as the location where a module
# should install pkg-config .pc files. By default the directory is
# $libdir/pkgconfig, but the default can be changed by passing
# DIRECTORY. The user can override through the --with-pkgconfigdir
# parameter.
AC_DEFUN([PKG_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])])
m4_pushdef([pkg_description],
@@ -304,18 +247,16 @@ AC_ARG_WITH([pkgconfigdir],
AC_SUBST([pkgconfigdir], [$with_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_INSTALLDIR
]) dnl PKG_INSTALLDIR
dnl PKG_NOARCH_INSTALLDIR([DIRECTORY])
dnl --------------------------------
dnl Since: 0.27
dnl
dnl Substitutes the variable noarch_pkgconfigdir as the location where a
dnl module should install arch-independent pkg-config .pc files. By
dnl default the directory is $datadir/pkgconfig, but the default can be
dnl changed by passing DIRECTORY. The user can override through the
dnl --with-noarch-pkgconfigdir parameter.
# PKG_NOARCH_INSTALLDIR(DIRECTORY)
# -------------------------
# Substitutes the variable noarch_pkgconfigdir as the location where a
# module should install arch-independent pkg-config .pc files. By
# default the directory is $datadir/pkgconfig, but the default can be
# changed by passing DIRECTORY. The user can override through the
# --with-noarch-pkgconfigdir parameter.
AC_DEFUN([PKG_NOARCH_INSTALLDIR],
[m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])])
m4_pushdef([pkg_description],
@@ -326,15 +267,13 @@ AC_ARG_WITH([noarch-pkgconfigdir],
AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir])
m4_popdef([pkg_default])
m4_popdef([pkg_description])
])dnl PKG_NOARCH_INSTALLDIR
]) dnl PKG_NOARCH_INSTALLDIR
dnl PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl -------------------------------------------
dnl Since: 0.28
dnl
dnl Retrieves the value of the pkg-config variable for the given module.
# PKG_CHECK_VAR(VARIABLE, MODULE, CONFIG-VARIABLE,
# [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
# -------------------------------------------
# Retrieves the value of the pkg-config variable for the given module.
AC_DEFUN([PKG_CHECK_VAR],
[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl
AC_ARG_VAR([$1], [value of $3 for $2, overriding pkg-config])dnl
@@ -343,77 +282,9 @@ _PKG_CONFIG([$1], [variable="][$3]["], [$2])
AS_VAR_COPY([$1], [pkg_cv_][$1])
AS_VAR_IF([$1], [""], [$5], [$4])dnl
])dnl PKG_CHECK_VAR
])# PKG_CHECK_VAR
dnl PKG_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND],
dnl [DESCRIPTION], [DEFAULT])
dnl ------------------------------------------
dnl
dnl Prepare a "--with-" configure option using the lowercase
dnl [VARIABLE-PREFIX] name, merging the behaviour of AC_ARG_WITH and
dnl PKG_CHECK_MODULES in a single macro.
AC_DEFUN([PKG_WITH_MODULES],
[
m4_pushdef([with_arg], m4_tolower([$1]))
m4_pushdef([description],
[m4_default([$5], [build with ]with_arg[ support])])
m4_pushdef([def_arg], [m4_default([$6], [auto])])
m4_pushdef([def_action_if_found], [AS_TR_SH([with_]with_arg)=yes])
m4_pushdef([def_action_if_not_found], [AS_TR_SH([with_]with_arg)=no])
m4_case(def_arg,
[yes],[m4_pushdef([with_without], [--without-]with_arg)],
[m4_pushdef([with_without],[--with-]with_arg)])
AC_ARG_WITH(with_arg,
AS_HELP_STRING(with_without, description[ @<:@default=]def_arg[@:>@]),,
[AS_TR_SH([with_]with_arg)=def_arg])
AS_CASE([$AS_TR_SH([with_]with_arg)],
[yes],[PKG_CHECK_MODULES([$1],[$2],$3,$4)],
[auto],[PKG_CHECK_MODULES([$1],[$2],
[m4_n([def_action_if_found]) $3],
[m4_n([def_action_if_not_found]) $4])])
m4_popdef([with_arg])
m4_popdef([description])
m4_popdef([def_arg])
])dnl PKG_WITH_MODULES
dnl PKG_HAVE_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [DESCRIPTION], [DEFAULT])
dnl -----------------------------------------------
dnl
dnl Convenience macro to trigger AM_CONDITIONAL after PKG_WITH_MODULES
dnl check._[VARIABLE-PREFIX] is exported as make variable.
AC_DEFUN([PKG_HAVE_WITH_MODULES],
[
PKG_WITH_MODULES([$1],[$2],,,[$3],[$4])
AM_CONDITIONAL([HAVE_][$1],
[test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"])
])dnl PKG_HAVE_WITH_MODULES
dnl PKG_HAVE_DEFINE_WITH_MODULES(VARIABLE-PREFIX, MODULES,
dnl [DESCRIPTION], [DEFAULT])
dnl ------------------------------------------------------
dnl
dnl Convenience macro to run AM_CONDITIONAL and AC_DEFINE after
dnl PKG_WITH_MODULES check. HAVE_[VARIABLE-PREFIX] is exported as make
dnl and preprocessor variable.
AC_DEFUN([PKG_HAVE_DEFINE_WITH_MODULES],
[
PKG_HAVE_WITH_MODULES([$1],[$2],[$3],[$4])
AS_IF([test "$AS_TR_SH([with_]m4_tolower([$1]))" = "yes"],
[AC_DEFINE([HAVE_][$1], 1, [Enable ]m4_tolower([$1])[ support])])
])dnl PKG_HAVE_DEFINE_WITH_MODULES
# Copyright (C) 1999-2020 Free Software Foundation, Inc.
# Copyright (C) 1999-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -447,11 +318,8 @@ AC_DEFUN([AM_PATH_PYTHON],
dnl Find a Python interpreter. Python versions prior to 2.0 are not
dnl supported. (2.0 was released on October 16, 2000).
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],
[python python2 python3 dnl
python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 dnl
python3.2 python3.1 python3.0 dnl
python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 dnl
python2.0])
[python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 dnl
python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0])
AC_ARG_VAR([PYTHON], [the Python interpreter])
@@ -651,7 +519,7 @@ for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]]
sys.exit(sys.hexversion < minverhex)"
AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])])
# Copyright (C) 2001-2020 Free Software Foundation, Inc.
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,

View File

@@ -1,44 +0,0 @@
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This file is part of the device-mapper userspace tools.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU Lesser General Public License v.2.1.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Uncomment this to build the simple radix tree. You'll need to make clean too.
# Comment to build the advanced radix tree.
#base/data-struct/radix-tree.o: CFLAGS += -DSIMPLE_RADIX_TREE
# NOTE: this Makefile only works as 'include' for toplevel Makefile
# which defined all top_* variables
BASE_SOURCE=\
base/data-struct/radix-tree.c
BASE_TARGET = base/libbase.a
BASE_DEPENDS = $(BASE_SOURCE:%.c=%.d)
BASE_OBJECTS = $(BASE_SOURCE:%.c=%.o)
CLEAN_TARGETS += $(BASE_DEPENDS) $(BASE_OBJECTS) \
$(BASE_SOURCE:%.c=%.gcda) \
$(BASE_SOURCE:%.c=%.gcno) \
$(BASE_TARGET)
$(BASE_TARGET): $(BASE_OBJECTS)
@echo " [AR] $@"
$(Q) $(RM) $@
$(Q) $(AR) rsv $@ $(BASE_OBJECTS) > /dev/null
ifneq (,$(findstring $(MAKECMDGOALS),clean distclean))
BASE_SOURCE += \
base/data-struct/hash.c \
base/data-struct/list.c
endif
ifeq ("$(DEPENDS)","yes")
-include $(BASE_DEPENDS)
endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,256 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#include "radix-tree.h"
#include "base/memory/container_of.h"
#include "base/memory/zalloc.h"
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
//----------------------------------------------------------------
// This implementation is based around nested binary trees. Very
// simple (and hopefully correct).
struct node {
struct node *left;
struct node *right;
uint8_t key;
struct node *center;
bool has_value;
union radix_value value;
};
struct radix_tree {
radix_value_dtr dtr;
void *dtr_context;
struct node *root;
};
struct radix_tree *
radix_tree_create(radix_value_dtr dtr, void *dtr_context)
{
struct radix_tree *rt = zalloc(sizeof(*rt));
if (rt) {
rt->dtr = dtr;
rt->dtr_context = dtr_context;
}
return rt;
}
// Returns the number of entries in the tree
static unsigned _destroy_tree(struct node *n, radix_value_dtr dtr, void *context)
{
unsigned r;
if (!n)
return 0;
r = _destroy_tree(n->left, dtr, context);
r += _destroy_tree(n->right, dtr, context);
r += _destroy_tree(n->center, dtr, context);
if (n->has_value) {
if (dtr)
dtr(context, n->value);
r++;
}
free(n);
return r;
}
void radix_tree_destroy(struct radix_tree *rt)
{
_destroy_tree(rt->root, rt->dtr, rt->dtr_context);
free(rt);
}
static unsigned _count(struct node *n)
{
unsigned r;
if (!n)
return 0;
r = _count(n->left);
r += _count(n->right);
r += _count(n->center);
if (n->has_value)
r++;
return r;
}
unsigned radix_tree_size(struct radix_tree *rt)
{
return _count(rt->root);
}
static struct node **_lookup(struct node **pn, uint8_t *kb, uint8_t *ke)
{
struct node *n = *pn;
if (!n || (kb == ke))
return pn;
if (*kb < n->key)
return _lookup(&n->left, kb, ke);
else if (*kb > n->key)
return _lookup(&n->right, kb, ke);
else
return _lookup(&n->center, kb + 1, ke);
}
static bool _insert(struct node **pn, uint8_t *kb, uint8_t *ke, union radix_value v)
{
struct node *n = *pn;
if (!n) {
n = zalloc(sizeof(*n));
if (!n)
return false;
n->key = *kb;
*pn = n;
}
if (kb == ke) {
n->has_value = true;
n->value = v;
return true;
}
if (*kb < n->key)
return _insert(&n->left, kb, ke, v);
else if (*kb > n->key)
return _insert(&n->right, kb, ke, v);
else
return _insert(&n->center, kb + 1, ke, v);
}
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v)
{
return _insert(&rt->root, kb, ke, v);
}
bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
{
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (!n || !n->has_value)
return false;
else {
if (rt->dtr)
rt->dtr(rt->dtr_context, n->value);
if (n->left || n->center || n->right) {
n->has_value = false;
return true;
} else {
// FIXME: delete parent if this was the last entry
free(n);
*pn = NULL;
}
return true;
}
}
unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *kb, uint8_t *ke)
{
struct node **pn;
unsigned count;
pn = _lookup(&rt->root, kb, ke);
if (*pn) {
count = _destroy_tree(*pn, rt->dtr, rt->dtr_context);
*pn = NULL;
}
return count;
}
bool
radix_tree_lookup(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value *result)
{
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (n && n->has_value) {
*result = n->value;
return true;
} else
return false;
}
static void _iterate(struct node *n, struct radix_tree_iterator *it)
{
if (!n)
return;
_iterate(n->left, it);
if (n->has_value)
// FIXME: fill out the key
it->visit(it, NULL, NULL, n->value);
_iterate(n->center, it);
_iterate(n->right, it);
}
void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
struct radix_tree_iterator *it)
{
if (kb == ke)
_iterate(rt->root, it);
else {
struct node **pn = _lookup(&rt->root, kb, ke);
struct node *n = *pn;
if (n) {
if (n->has_value)
it->visit(it, NULL, NULL, n->value);
_iterate(n->center, it);
}
}
}
bool radix_tree_is_well_formed(struct radix_tree *rt)
{
return true;
}
void radix_tree_dump(struct radix_tree *rt, FILE *out)
{
}
//----------------------------------------------------------------

View File

@@ -1,21 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//----------------------------------------------------------------
#ifdef SIMPLE_RADIX_TREE
#include "base/data-struct/radix-tree-simple.c"
#else
#include "base/data-struct/radix-tree-adaptive.c"
#endif
//----------------------------------------------------------------

View File

@@ -1,64 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_DATA_STRUCT_RADIX_TREE_H
#define BASE_DATA_STRUCT_RADIX_TREE_H
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
//----------------------------------------------------------------
struct radix_tree;
union radix_value {
void *ptr;
uint64_t n;
};
typedef void (*radix_value_dtr)(void *context, union radix_value v);
// dtr will be called on any deleted entries. dtr may be NULL.
struct radix_tree *radix_tree_create(radix_value_dtr dtr, void *dtr_context);
void radix_tree_destroy(struct radix_tree *rt);
unsigned radix_tree_size(struct radix_tree *rt);
bool radix_tree_insert(struct radix_tree *rt, uint8_t *kb, uint8_t *ke, union radix_value v);
bool radix_tree_remove(struct radix_tree *rt, uint8_t *kb, uint8_t *ke);
// Returns the number of values removed
unsigned radix_tree_remove_prefix(struct radix_tree *rt, uint8_t *prefix_b, uint8_t *prefix_e);
bool radix_tree_lookup(struct radix_tree *rt,
uint8_t *kb, uint8_t *ke, union radix_value *result);
// The radix tree stores entries in lexicographical order. Which means
// we can iterate entries, in order. Or iterate entries with a particular
// prefix.
struct radix_tree_iterator {
// Returns false if the iteration should end.
bool (*visit)(struct radix_tree_iterator *it,
uint8_t *kb, uint8_t *ke, union radix_value v);
};
void radix_tree_iterate(struct radix_tree *rt, uint8_t *kb, uint8_t *ke,
struct radix_tree_iterator *it);
// Checks that some constraints on the shape of the tree are
// being held. For debug only.
bool radix_tree_is_well_formed(struct radix_tree *rt);
void radix_tree_dump(struct radix_tree *rt, FILE *out);
//----------------------------------------------------------------
#endif

View File

@@ -1,25 +0,0 @@
// Copyright (C) 2018 - 2020 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_CONTAINER_OF_H
#define BASE_MEMORY_CONTAINER_OF_H
#include <stddef.h> // offsetof
//----------------------------------------------------------------
#define container_of(v, t, head) \
((t *)((char *)(v) - offsetof(t, head)))
//----------------------------------------------------------------
#endif

View File

@@ -1,31 +0,0 @@
// Copyright (C) 2018 Red Hat, Inc. All rights reserved.
//
// This file is part of LVM2.
//
// This copyrighted material is made available to anyone wishing to use,
// modify, copy, or redistribute it subject to the terms and conditions
// of the GNU Lesser General Public License v.2.1.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software Foundation,
// Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#ifndef BASE_MEMORY_ZALLOC_H
#define BASE_MEMORY_ZALLOC_H
#include <stdlib.h>
#include <string.h>
//----------------------------------------------------------------
static inline void *zalloc(size_t len)
{
void *ptr = malloc(len);
if (ptr)
memset(ptr, 0, len);
return ptr;
}
//----------------------------------------------------------------
#endif

View File

@@ -32,8 +32,8 @@ include $(top_builddir)/make.tmpl
.PHONY: install_conf install_localconf install_profiles
generate:
LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withgeneralpreamble --withcomments --ignorelocal --withspaces > example.conf.in
LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withlocalpreamble --withcomments --withspaces local > lvmlocal.conf.in
(cat $(top_srcdir)/conf/example.conf.base && LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withcomments --ignorelocal --withspaces) > example.conf.in
(cat $(top_srcdir)/conf/lvmlocal.conf.base && LD_LIBRARY_PATH=$(top_builddir)/libdm:$(LD_LIBRARY_PATH) $(top_builddir)/tools/lvm dumpconfig --type default --unconfigured --withcomments --withspaces local) > lvmlocal.conf.in
install_conf: $(CONFSRC)
@if [ ! -e $(confdir)/$(CONFDEST) ]; then \
@@ -48,8 +48,8 @@ install_localconf: $(CONFLOCAL)
fi
install_profiles: $(PROFILES)
$(INSTALL_DIR) $(profiledir)
$(INSTALL_DATA) $(PROFILES) $(profiledir)/
$(INSTALL_DIR) $(DESTDIR)$(DEFAULT_PROFILE_DIR)
$(INSTALL_DATA) $(PROFILES) $(DESTDIR)$(DEFAULT_PROFILE_DIR)/
install_lvm2: install_conf install_localconf install_profiles

View File

@@ -9,6 +9,6 @@ allocation {
cache_mode = "writethrough"
cache_policy = "smq"
cache_settings {
# currently no settings for "smq" policy
# currently no settins for "smq" policy
}
}

View File

@@ -39,7 +39,7 @@ report {
list_item_separator=","
prefixes=0
quoted=1
columns_as_rows=0
colums_as_rows=0
binary_values_as_numeric=0
time_format="%Y-%m-%d %T %z"
devtypes_sort="devtype_name"

23
conf/example.conf.base Normal file
View File

@@ -0,0 +1,23 @@
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# @DEFAULT_SYS_DIR@/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# Refer to 'man lvm.conf' for information about how settings configured in
# this file are combined with built-in values and command line options to
# arrive at the final values used by LVM.
#
# Refer to 'man lvmconfig' for information about displaying the built-in
# and configured values used by LVM.
#
# If a default value is set in this file (not commented out), then a
# new version of LVM using this file will continue using that value,
# even if the new version of LVM changes the built-in default value.
#
# To put this file in a different directory and override @DEFAULT_SYS_DIR@ set
# the environment variable LVM_SYSTEM_DIR before running the tools.
#
# N.B. Take care that each setting only appears once if uncommenting
# example settings in this file.

View File

@@ -185,20 +185,6 @@ devices {
# present on the system. sysfs must be part of the kernel and mounted.)
sysfs_scan = 1
# Configuration option devices/scan_lvs.
# Scan LVM LVs for layered PVs, allowing LVs to be used as PVs.
# When 1, LVM will detect PVs layered on LVs, and caution must be
# taken to avoid a host accessing a layered VG that may not belong
# to it, e.g. from a guest image. This generally requires excluding
# the LVs with device filters. Also, when this setting is enabled,
# every LVM command will scan every active LV on the system (unless
# filtered), which can cause performance problems on systems with
# many active LVs. When this setting is 0, LVM will not detect or
# use PVs that exist on LVs, and will not allow a PV to be created on
# an LV. The LVs are ignored using a built in device filter that
# identifies and excludes LVs.
scan_lvs = 0
# Configuration option devices/multipath_component_detection.
# Ignore devices that are components of DM multipath devices.
multipath_component_detection = 1
@@ -326,12 +312,6 @@ devices {
# Enabling this setting allows the VG to be used as usual even with
# uncertain devices.
allow_changes_with_duplicate_pvs = 0
# Configuration option devices/allow_mixed_block_sizes.
# Allow PVs in the same VG with different logical block sizes.
# When allowed, the user is responsible to ensure that an LV is
# using PVs with matching block sizes when necessary.
allow_mixed_block_sizes = 1
}
# Configuration section allocation.
@@ -399,9 +379,8 @@ allocation {
# Configuration option allocation/raid_stripe_all_devices.
# Stripe across all PVs when RAID stripes are not specified.
# If enabled, all PVs in the VG or on the command line are used for
# raid0/4/5/6/10 when the command does not specify the number of
# stripes to use.
# If enabled, all PVs in the VG or on the command line are used for raid0/4/5/6/10
# when the command does not specify the number of stripes to use.
# This was the default behaviour until release 2.02.162.
# This configuration option has an automatic default value.
# raid_stripe_all_devices = 0
@@ -410,17 +389,6 @@ allocation {
# Cache pool metadata and data will always use different PVs.
cache_pool_metadata_require_separate_pvs = 0
# Configuration option allocation/cache_metadata_format.
# Sets default metadata format for new cache.
#
# Accepted values:
# 0 Automatically detected best available format
# 1 Original format
# 2 Improved 2nd. generation format
#
# This configuration option has an automatic default value.
# cache_metadata_format = 0
# Configuration option allocation/cache_mode.
# The default cache mode used for new cache.
#
@@ -437,7 +405,7 @@ allocation {
# Configuration option allocation/cache_policy.
# The default cache policy used for new cache volume.
# Since kernel 4.2 the default policy is smq (Stochastic multiqueue),
# Since kernel 4.2 the default policy is smq (Stochastic multique),
# otherwise the older mq (Multiqueue) policy is selected.
# This configuration option does not have a default value defined.
@@ -460,23 +428,10 @@ allocation {
# 32KiB to 1GiB in multiples of 32.
# This configuration option does not have a default value defined.
# Configuration option allocation/cache_pool_max_chunks.
# The maximum number of chunks in a cache pool.
# For cache target v1.9 the recommended maximumm is 1000000 chunks.
# Using cache pool with more chunks may degrade cache performance.
# This configuration option does not have a default value defined.
# Configuration option allocation/thin_pool_metadata_require_separate_pvs.
# Thin pool metadata and data will always use different PVs.
# Thin pool metdata and data will always use different PVs.
thin_pool_metadata_require_separate_pvs = 0
# Configuration option allocation/thin_pool_crop_metadata.
# Older version of lvm2 cropped pool's metadata size to 15.81 GiB.
# This is slightly less then the actual maximum 15.88 GiB.
# For compatibility with older version and use of cropped size set to 1.
# This configuration option has an automatic default value.
# thin_pool_crop_metadata = 0
# Configuration option allocation/thin_pool_zero.
# Thin pool data chunks are zeroed before they are first used.
# Zeroing with a larger thin pool chunk size reduces performance.
@@ -512,10 +467,6 @@ allocation {
# This configuration option has an automatic default value.
# thin_pool_chunk_size_policy = "generic"
# Configuration option allocation/zero_metadata.
# Zero whole metadata area before use with thin or cache pool.
zero_metadata = 1
# Configuration option allocation/thin_pool_chunk_size.
# The minimal chunk size in KiB for thin pool volumes.
# Larger chunk sizes may improve performance for plain thin volumes,
@@ -642,9 +593,9 @@ log {
# Select log messages by class.
# Some debugging messages are assigned to a class and only appear in
# debug output if the class is listed here. Classes currently
# available: memory, devices, io, activation, allocation, lvmetad,
# available: memory, devices, activation, allocation, lvmetad,
# metadata, cache, locking, lvmpolld. Use "all" to see everything.
debug_classes = [ "memory", "devices", "io", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
}
# Configuration section backup.
@@ -708,7 +659,7 @@ global {
# Configuration option global/units.
# Default value for --units argument.
units = "r"
units = "h"
# Configuration option global/si_unit_consistency.
# Distinguish between powers of 1024 and 1000 bytes.
@@ -733,17 +684,29 @@ global {
activation = 1
# Configuration option global/fallback_to_lvm1.
# This setting is no longer used.
# Try running LVM1 tools if LVM cannot communicate with DM.
# This option only applies to 2.4 kernels and is provided to help
# switch between device-mapper kernels and LVM1 kernels. The LVM1
# tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.
# They will stop working once the lvm2 on-disk metadata format is used.
# This configuration option has an automatic default value.
# fallback_to_lvm1 = 0
# fallback_to_lvm1 = @DEFAULT_FALLBACK_TO_LVM1@
# Configuration option global/format.
# This setting is no longer used.
# The default metadata format that commands should use.
# The -M 1|2 option overrides this setting.
#
# Accepted values:
# lvm1
# lvm2
#
# This configuration option has an automatic default value.
# format = "lvm2"
# Configuration option global/format_libraries.
# This setting is no longer used.
# Shared libraries that process different metadata formats.
# If support for LVM1 metadata was compiled as a shared library use
# format_libraries = "liblvm2format1.so"
# This configuration option does not have a default value defined.
# Configuration option global/segment_libraries.
@@ -840,6 +803,13 @@ global {
# encountered the internal error. Please only enable for debugging.
abort_on_internal_errors = 0
# Configuration option global/detect_internal_vg_cache_corruption.
# Internal verification of VG structures.
# Check if CRC matches when a parsed VG is used multiple times. This
# is useful to catch unexpected changes to cached VG structures.
# Please only enable for debugging.
detect_internal_vg_cache_corruption = 0
# Configuration option global/metadata_read_only.
# No operations that change on-disk metadata are permitted.
# Additionally, read-only commands that encounter metadata in need of
@@ -922,11 +892,6 @@ global {
# This configuration option has an automatic default value.
# lvdisplay_shows_full_device_path = 0
# Configuration option global/use_aio.
# Use async I/O when reading and writing devices.
# This configuration option has an automatic default value.
# use_aio = 1
# Configuration option global/use_lvmetad.
# Use lvmetad to cache metadata and reduce disk scanning.
# When enabled (and running), lvmetad provides LVM commands with VG
@@ -957,7 +922,7 @@ global {
use_lvmetad = @DEFAULT_USE_LVMETAD@
# Configuration option global/lvmetad_update_wait_time.
# Number of seconds a command will wait for lvmetad update to finish.
# The number of seconds a command will wait for lvmetad update to finish.
# After waiting for this period, a command will not use lvmetad, and
# will revert to disk scanning.
# This configuration option has an automatic default value.
@@ -1042,7 +1007,7 @@ global {
# Configuration option global/cache_disabled_features.
# Features to not use in the cache driver.
# This can be helpful for testing, or to avoid using a feature that is
# causing problems. Features include: policy_mq, policy_smq, metadata2.
# causing problems. Features include: policy_mq, policy_smq.
#
# Example
# cache_disabled_features = [ "policy_smq" ]
@@ -1087,12 +1052,6 @@ global {
# This configuration option has an automatic default value.
# cache_repair_options = [ "" ]
# Configuration option global/fsadm_executable.
# The full path to the fsadm command.
# LVM uses this command to help with lvresize -r operations.
# This configuration option has an automatic default value.
# fsadm_executable = "@FSADM_PATH@"
# Configuration option global/system_id_source.
# The method LVM uses to set the local system ID.
# Volume Groups can also be given a system ID (by vgcreate, vgchange,
@@ -1144,16 +1103,6 @@ global {
# When enabled, an LVM command that changes PVs, changes VG metadata,
# or changes the activation state of an LV will send a notification.
notify_dbus = 1
# Configuration option global/io_memory_size.
# The amount of memory in KiB that LVM allocates to perform disk io.
# LVM performance may benefit from more io memory when there are many
# disks or VG metadata is large. Increasing this size may be necessary
# when a single copy of VG metadata is larger than the current setting.
# This value should usually not be decreased from the default; setting
# it too low can result in lvm failing to read VGs.
# This configuration option has an automatic default value.
# io_memory_size = 8192
}
# Configuration section activation.
@@ -1201,8 +1150,7 @@ activation {
# Configuration option activation/missing_stripe_filler.
# Method to fill missing stripes when activating an incomplete LV.
# Using 'error' will make inaccessible parts of the device return I/O
# errors on access. Using 'zero' will return success (and zero) on I/O
# You can instead use a device path, in which case,
# errors on access. You can instead use a device path, in which case,
# that device will be used in place of missing stripes. Using anything
# other than 'error' with mirrored or snapshotted volumes is likely to
# result in data corruption.
@@ -1322,10 +1270,9 @@ activation {
# Configuration option activation/raid_region_size.
# Size in KiB of each raid or mirror synchronization region.
# The clean/dirty state of data is tracked for each region.
# The value is rounded down to a power of two if necessary, and
# is ignored if it is not a multiple of the machine memory page size.
raid_region_size = 2048
# For raid or mirror segment types, this is the amount of data that is
# copied at once when initializing, or moved at once by pvmove.
raid_region_size = 512
# Configuration option activation/error_when_full.
# Return errors if a thin pool runs out of space.
@@ -1737,11 +1684,11 @@ activation {
# This configuration option has an automatic default value.
# quoted = 1
# Configuration option report/columns_as_rows.
# Configuration option report/colums_as_rows.
# Output each column as a row.
# If set, this also implies report/prefixes=1.
# This configuration option has an automatic default value.
# columns_as_rows = 0
# colums_as_rows = 0
# Configuration option report/binary_values_as_numeric.
# Use binary values 0 or 1 instead of descriptive literal values.
@@ -2095,15 +2042,6 @@ dmeventd {
# warning is repeated when 85%, 90% and 95% of the pool is filled.
thin_library = "libdevmapper-event-lvm2thin.so"
# Configuration option dmeventd/thin_command.
# The plugin runs command with each 5% increment when thin-pool data volume
# or metadata volume gets above 50%.
# Command which starts with 'lvm ' prefix is internal lvm command.
# You can write your own handler to customise behaviour in more details.
# User handler is specified with the full path starting with '/'.
# This configuration option has an automatic default value.
# thin_command = "lvm lvextend --use-policies"
# Configuration option dmeventd/executable.
# The full path to the dmeventd binary.
# This configuration option has an automatic default value.

19
conf/lvmlocal.conf.base Normal file
View File

@@ -0,0 +1,19 @@
# This is a local configuration file template for the LVM2 system
# which should be installed as @DEFAULT_SYS_DIR@/lvmlocal.conf .
#
# Refer to 'man lvm.conf' for information about the file layout.
#
# To put this file in a different directory and override
# @DEFAULT_SYS_DIR@ set the environment variable LVM_SYSTEM_DIR before
# running the tools.
#
# The lvmlocal.conf file is normally expected to contain only the
# "local" section which contains settings that should not be shared or
# repeated among different hosts. (But if other sections are present,
# they *will* get processed. Settings in this file override equivalent
# ones in lvm.conf and are in turn overridden by ones in any enabled
# lvm_<tag>.conf files.)
#
# Please take care that each setting only appears once if uncommenting
# example settings in this file and never copy this file between hosts.

1160
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,6 @@ AC_PREREQ(2.69)
################################################################################
dnl -- Process this file with autoconf to produce a configure script.
AC_INIT
CONFIGURE_LINE="$0 $@"
AC_CONFIG_SRCDIR([lib/device/dev-cache.h])
AC_CONFIG_HEADERS([include/configure.h])
@@ -31,7 +30,6 @@ AS_IF([test -z "$CFLAGS"], [COPTIMISE_FLAG="-O2"])
case "$host_os" in
linux*)
CLDFLAGS="$CLDFLAGS -Wl,--version-script,.export.sym"
# equivalent to -rdynamic
ELDFLAGS="-Wl,--export-dynamic"
# FIXME Generate list and use --dynamic-list=.dlopen.sym
CLDWHOLEARCHIVE="-Wl,-whole-archive"
@@ -77,7 +75,6 @@ AC_PROG_CC
AC_PROG_CXX
CFLAGS=$save_CFLAGS
CXXFLAGS=$save_CXXFLAGS
PATH_SBIN="$PATH:/usr/sbin:/sbin"
dnl probably no longer needed in 2008, but...
AC_PROG_GCC_TRADITIONAL
@@ -86,12 +83,9 @@ AC_PROG_LN_S
AC_PROG_MAKE_SET
AC_PROG_MKDIR_P
AC_PROG_RANLIB
AC_CHECK_TOOL(AR, ar)
AC_PATH_TOOL(CFLOW_CMD, cflow)
AC_PATH_TOOL(CSCOPE_CMD, cscope)
AC_PATH_TOOL(CHMOD, chmod)
AC_PATH_TOOL(WC, wc)
AC_PATH_TOOL(SORT, sort)
################################################################################
dnl -- Check for header files.
@@ -103,13 +97,13 @@ AC_HEADER_SYS_WAIT
AC_HEADER_TIME
AC_CHECK_HEADERS([assert.h ctype.h dirent.h errno.h fcntl.h float.h \
getopt.h inttypes.h langinfo.h libaio.h libgen.h limits.h locale.h paths.h \
getopt.h inttypes.h langinfo.h libgen.h limits.h locale.h paths.h \
signal.h stdarg.h stddef.h stdio.h stdlib.h string.h sys/file.h \
sys/ioctl.h syslog.h sys/mman.h sys/param.h sys/resource.h sys/stat.h \
sys/time.h sys/types.h sys/utsname.h sys/wait.h time.h \
unistd.h], , [AC_MSG_ERROR(bailing out)])
AC_CHECK_HEADERS(termios.h sys/statvfs.h sys/timerfd.h sys/vfs.h linux/magic.h linux/fiemap.h)
AC_CHECK_HEADERS(termios.h sys/statvfs.h sys/timerfd.h linux/magic.h linux/fiemap.h)
case "$host_os" in
linux*)
@@ -124,7 +118,6 @@ AC_C_CONST
AC_C_INLINE
AC_CHECK_MEMBERS([struct stat.st_rdev])
AC_CHECK_TYPES([ptrdiff_t])
AC_STRUCT_ST_BLOCKS
AC_STRUCT_TM
AC_TYPE_OFF_T
AC_TYPE_PID_T
@@ -141,12 +134,11 @@ AC_TYPE_UINT8_T
AC_TYPE_UINT16_T
AC_TYPE_UINT32_T
AC_TYPE_UINT64_T
AX_GCC_BUILTIN([__builtin_clz])
################################################################################
dnl -- Check for functions
AC_CHECK_FUNCS([ftruncate gethostname getpagesize gettimeofday localtime_r \
memchr memset mkdir mkfifo munmap nl_langinfo pselect realpath rmdir setenv \
memchr memset mkdir mkfifo munmap nl_langinfo realpath rmdir setenv \
setlocale strcasecmp strchr strcspn strdup strerror strncasecmp strndup \
strrchr strspn strstr strtol strtoul uname], , [AC_MSG_ERROR(bailing out)])
AC_FUNC_ALLOCA
@@ -192,15 +184,9 @@ AC_SUBST(HAVE_FULL_RELRO)
################################################################################
dnl -- Prefix is /usr by default, the exec_prefix default is setup later
AC_PREFIX_DEFAULT(/usr)
################################################################################
dnl -- Clear default exec_prefix - install into /sbin rather than /usr/sbin
test "$exec_prefix" = NONE -a "$prefix" = NONE && exec_prefix=""
test "x$prefix" = xNONE && prefix=$ac_default_prefix
# Let make expand exec_prefix.
test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
if test "$prefix" = NONE; then
datarootdir=${ac_default_prefix}/share
fi
################################################################################
dnl -- Setup the ownership of the files
@@ -282,6 +268,58 @@ esac
AC_MSG_RESULT($MANGLING)
AC_DEFINE_UNQUOTED([DEFAULT_DM_NAME_MANGLING], $mangling, [Define default name mangling behaviour])
################################################################################
dnl -- LVM1 tool fallback option
AC_MSG_CHECKING(whether to enable lvm1 fallback)
AC_ARG_ENABLE(lvm1_fallback,
AC_HELP_STRING([--enable-lvm1_fallback],
[use this to fall back and use LVM1 binaries if
device-mapper is missing from the kernel]),
LVM1_FALLBACK=$enableval, LVM1_FALLBACK=no)
AC_MSG_RESULT($LVM1_FALLBACK)
if test "$LVM1_FALLBACK" = yes; then
DEFAULT_FALLBACK_TO_LVM1=1
AC_DEFINE([LVM1_FALLBACK], 1, [Define to 1 if 'lvm' should fall back to using LVM1 binaries if device-mapper is missing from the kernel])
else
DEFAULT_FALLBACK_TO_LVM1=0
fi
AC_DEFINE_UNQUOTED(DEFAULT_FALLBACK_TO_LVM1, [$DEFAULT_FALLBACK_TO_LVM1],
[Fall back to LVM1 by default if device-mapper is missing from the kernel.])
################################################################################
dnl -- format1 inclusion type
AC_MSG_CHECKING(whether to include support for lvm1 metadata)
AC_ARG_WITH(lvm1,
AC_HELP_STRING([--with-lvm1=TYPE],
[LVM1 metadata support: internal/shared/none [internal]]),
LVM1=$withval, LVM1=internal)
AC_MSG_RESULT($LVM1)
case "$LVM1" in
none|shared) ;;
internal) AC_DEFINE([LVM1_INTERNAL], 1,
[Define to 1 to include built-in support for LVM1 metadata.]) ;;
*) AC_MSG_ERROR([--with-lvm1 parameter invalid]) ;;
esac
################################################################################
dnl -- format_pool inclusion type
AC_MSG_CHECKING(whether to include support for GFS pool metadata)
AC_ARG_WITH(pool,
AC_HELP_STRING([--with-pool=TYPE],
[GFS pool read-only support: internal/shared/none [internal]]),
POOL=$withval, POOL=internal)
AC_MSG_RESULT($POOL)
case "$POOL" in
none|shared) ;;
internal) AC_DEFINE([POOL_INTERNAL], 1,
[Define to 1 to include built-in support for GFS pool metadata.]) ;;
*) AC_MSG_ERROR([--with-pool parameter invalid])
esac
################################################################################
dnl -- cluster_locking inclusion type
AC_MSG_CHECKING(whether to include support for cluster locking)
@@ -332,6 +370,13 @@ esac
################################################################################
dnl -- raid inclusion type
AC_MSG_CHECKING(whether to include raid)
AC_ARG_WITH(raid,
AC_HELP_STRING([--with-raid=TYPE],
[raid support: internal/shared/none [internal]]),
RAID=$withval, RAID=internal)
AC_MSG_RESULT($RAID)
AC_ARG_WITH(default-mirror-segtype,
AC_HELP_STRING([--with-default-mirror-segtype=TYPE],
[default mirror segtype: raid1/mirror [raid1]]),
@@ -340,9 +385,14 @@ AC_ARG_WITH(default-raid10-segtype,
AC_HELP_STRING([--with-default-raid10-segtype=TYPE],
[default mirror segtype: raid10/mirror [raid10]]),
DEFAULT_RAID10_SEGTYPE=$withval, DEFAULT_RAID10_SEGTYPE="raid10")
AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.])
case "$RAID" in
none) test "$DEFAULT_MIRROR_SEGTYPE" = "raid1" && DEFAULT_MIRROR_SEGTYPE="mirror"
test "$DEFAULT_RAID10_SEGTYPE" = "raid10" && DEFAULT_RAID10_SEGTYPE="mirror" ;;
shared) ;;
internal) AC_DEFINE([RAID_INTERNAL], 1,
[Define to 1 to include built-in support for raid.]) ;;
*) AC_MSG_ERROR([--with-raid parameter invalid]) ;;
esac
AC_DEFINE_UNQUOTED([DEFAULT_MIRROR_SEGTYPE], ["$DEFAULT_MIRROR_SEGTYPE"],
[Default segtype used for mirror volumes.])
@@ -351,6 +401,22 @@ AC_DEFINE_UNQUOTED([DEFAULT_RAID10_SEGTYPE], ["$DEFAULT_RAID10_SEGTYPE"],
[Default segtype used for raid10 volumes.])
################################################################################
dnl -- asynchronous volume replicator inclusion type
AC_MSG_CHECKING(whether to include replicators)
AC_ARG_WITH(replicators,
AC_HELP_STRING([--with-replicators=TYPE],
[replicator support: internal/shared/none [none]]),
REPLICATORS=$withval, REPLICATORS=none)
AC_MSG_RESULT($REPLICATORS)
case "$REPLICATORS" in
none|shared) ;;
internal) AC_DEFINE([REPLICATOR_INTERNAL], 1,
[Define to 1 to include built-in support for replicators.]) ;;
*) AC_MSG_ERROR([--with-replicators parameter invalid ($REPLICATORS)]) ;;
esac
AC_ARG_WITH(default-sparse-segtype,
AC_HELP_STRING([--with-default-sparse-segtype=TYPE],
[default sparse segtype: thin/snapshot [thin]]),
@@ -405,7 +471,7 @@ case "$THIN" in
internal|shared)
# Empty means a config way to ignore thin checking
if test "$THIN_CHECK_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_CHECK_CMD, thin_check, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_CHECK_CMD, thin_check)
if test -z "$THIN_CHECK_CMD"; then
AC_MSG_WARN([thin_check not found in path $PATH])
THIN_CHECK_CMD=/usr/sbin/thin_check
@@ -429,7 +495,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin dumping
if test "$THIN_DUMP_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_DUMP_CMD, thin_dump, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_DUMP_CMD, thin_dump)
test -z "$THIN_DUMP_CMD" && {
AC_MSG_WARN(thin_dump not found in path $PATH)
THIN_DUMP_CMD=/usr/sbin/thin_dump
@@ -438,7 +504,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin repairing
if test "$THIN_REPAIR_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_REPAIR_CMD, thin_repair, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_REPAIR_CMD, thin_repair)
test -z "$THIN_REPAIR_CMD" && {
AC_MSG_WARN(thin_repair not found in path $PATH)
THIN_REPAIR_CMD=/usr/sbin/thin_repair
@@ -447,7 +513,7 @@ case "$THIN" in
fi
# Empty means a config way to ignore thin restoring
if test "$THIN_RESTORE_CMD" = "autodetect"; then
AC_PATH_TOOL(THIN_RESTORE_CMD, thin_restore, [], [$PATH_SBIN])
AC_PATH_TOOL(THIN_RESTORE_CMD, thin_restore)
test -z "$THIN_RESTORE_CMD" && {
AC_MSG_WARN(thin_restore not found in path $PATH)
THIN_RESTORE_CMD=/usr/sbin/thin_restore
@@ -519,7 +585,7 @@ case "$CACHE" in
internal|shared)
# Empty means a config way to ignore cache checking
if test "$CACHE_CHECK_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_CHECK_CMD, cache_check, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_CHECK_CMD, cache_check)
if test -z "$CACHE_CHECK_CMD"; then
AC_MSG_WARN([cache_check not found in path $PATH])
CACHE_CHECK_CMD=/usr/sbin/cache_check
@@ -546,15 +612,11 @@ case "$CACHE" in
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
fi
if test "$CACHE_CHECK_VSN_MINOR" -lt 7 ; then
AC_MSG_WARN([$CACHE_CHECK_CMD: Old version "$CACHE_CHECK_VSN" does not support new cache format V2])
CACHE_CHECK_VERSION_WARN=y
fi
fi
fi
# Empty means a config way to ignore cache dumping
if test "$CACHE_DUMP_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_DUMP_CMD, cache_dump, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_DUMP_CMD, cache_dump)
test -z "$CACHE_DUMP_CMD" && {
AC_MSG_WARN(cache_dump not found in path $PATH)
CACHE_DUMP_CMD=/usr/sbin/cache_dump
@@ -563,7 +625,7 @@ case "$CACHE" in
fi
# Empty means a config way to ignore cache repairing
if test "$CACHE_REPAIR_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_REPAIR_CMD, cache_repair, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_REPAIR_CMD, cache_repair)
test -z "$CACHE_REPAIR_CMD" && {
AC_MSG_WARN(cache_repair not found in path $PATH)
CACHE_REPAIR_CMD=/usr/sbin/cache_repair
@@ -572,7 +634,7 @@ case "$CACHE" in
fi
# Empty means a config way to ignore cache restoring
if test "$CACHE_RESTORE_CMD" = "autodetect"; then
AC_PATH_TOOL(CACHE_RESTORE_CMD, cache_restore, [], [$PATH_SBIN])
AC_PATH_TOOL(CACHE_RESTORE_CMD, cache_restore)
test -z "$CACHE_RESTORE_CMD" && {
AC_MSG_WARN(cache_restore not found in path $PATH)
CACHE_RESTORE_CMD=/usr/sbin/cache_restore
@@ -603,15 +665,11 @@ AC_DEFINE_UNQUOTED([CACHE_RESTORE_CMD], ["$CACHE_RESTORE_CMD"],
################################################################################
dnl -- Disable readline
AC_MSG_CHECKING(whether to enable readline)
AC_ARG_ENABLE([readline],
AC_HELP_STRING([--disable-readline], [disable readline support]),
READLINE=$enableval, READLINE=maybe)
################################################################################
dnl -- Disable editline
AC_ARG_ENABLE([editline],
AC_HELP_STRING([--enable-editline], [enable editline support]),
EDITLINE=$enableval, EDITLINE=no)
AC_MSG_RESULT($READLINE)
################################################################################
dnl -- Disable realtime clock support
@@ -656,7 +714,7 @@ dnl -- Set up pidfile and run directory
AH_TEMPLATE(DEFAULT_PID_DIR)
AC_ARG_WITH(default-pid-dir,
AC_HELP_STRING([--with-default-pid-dir=PID_DIR],
[default directory to keep PID files in [autodetect]]),
[Default directory to keep PID files in. [autodetect]]),
DEFAULT_PID_DIR="$withval", DEFAULT_PID_DIR=$RUN_DIR)
AC_DEFINE_UNQUOTED(DEFAULT_PID_DIR, ["$DEFAULT_PID_DIR"],
[Default directory to keep PID files in.])
@@ -664,7 +722,7 @@ AC_DEFINE_UNQUOTED(DEFAULT_PID_DIR, ["$DEFAULT_PID_DIR"],
AH_TEMPLATE(DEFAULT_DM_RUN_DIR, [Name of default DM run directory.])
AC_ARG_WITH(default-dm-run-dir,
AC_HELP_STRING([--with-default-dm-run-dir=DM_RUN_DIR],
[default DM run directory [autodetect]]),
[ Default DM run directory. [autodetect]]),
DEFAULT_DM_RUN_DIR="$withval", DEFAULT_DM_RUN_DIR=$RUN_DIR)
AC_DEFINE_UNQUOTED(DEFAULT_DM_RUN_DIR, ["$DEFAULT_DM_RUN_DIR"],
[Default DM run directory.])
@@ -672,7 +730,7 @@ AC_DEFINE_UNQUOTED(DEFAULT_DM_RUN_DIR, ["$DEFAULT_DM_RUN_DIR"],
AH_TEMPLATE(DEFAULT_RUN_DIR, [Name of default LVM run directory.])
AC_ARG_WITH(default-run-dir,
AC_HELP_STRING([--with-default-run-dir=RUN_DIR],
[default LVM run directory [autodetect_run_dir/lvm]]),
[Default LVM run directory. [autodetect_run_dir/lvm]]),
DEFAULT_RUN_DIR="$withval", DEFAULT_RUN_DIR="$RUN_DIR/lvm")
AC_DEFINE_UNQUOTED(DEFAULT_RUN_DIR, ["$DEFAULT_RUN_DIR"],
[Default LVM run directory.])
@@ -1009,6 +1067,20 @@ if test "$PROFILING" = yes; then
fi
fi
################################################################################
dnl -- Enable testing
AC_MSG_CHECKING(whether to enable unit testing)
AC_ARG_ENABLE(testing,
AC_HELP_STRING([--enable-testing],
[enable testing targets in the makefile]),
TESTING=$enableval, TESTING=no)
AC_MSG_RESULT($TESTING)
if test "$TESTING" = yes; then
pkg_config_init
PKG_CHECK_MODULES(CUNIT, cunit >= 2.0)
fi
################################################################################
dnl -- Set LVM2 testsuite data
TESTSUITE_DATA='${datarootdir}/lvm2-testsuite'
@@ -1073,10 +1145,10 @@ AC_MSG_RESULT($BUILD_LVMPOLLD)
################################################################################
BUILD_LVMLOCKD=no
dnl -- Build lvmlockdsanlock
AC_MSG_CHECKING(whether to build lvmlockdsanlock)
AC_ARG_ENABLE(lvmlockd-sanlock,
AC_HELP_STRING([--enable-lvmlockd-sanlock],
dnl -- Build lockdsanlock
AC_MSG_CHECKING(whether to build lockdsanlock)
AC_ARG_ENABLE(lockd-sanlock,
AC_HELP_STRING([--enable-lockd-sanlock],
[enable the LVM lock daemon using sanlock]),
LOCKDSANLOCK=$enableval)
AC_MSG_RESULT($LOCKDSANLOCK)
@@ -1091,10 +1163,10 @@ if test "$BUILD_LOCKDSANLOCK" = yes; then
fi
################################################################################
dnl -- Build lvmlockddlm
AC_MSG_CHECKING(whether to build lvmlockddlm)
AC_ARG_ENABLE(lvmlockd-dlm,
AC_HELP_STRING([--enable-lvmlockd-dlm],
dnl -- Build lockddlm
AC_MSG_CHECKING(whether to build lockddlm)
AC_ARG_ENABLE(lockd-dlm,
AC_HELP_STRING([--enable-lockd-dlm],
[enable the LVM lock daemon using dlm]),
LOCKDDLM=$enableval)
AC_MSG_RESULT($LOCKDDLM)
@@ -1196,80 +1268,75 @@ fi
AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMPOLLD, [$DEFAULT_USE_LVMPOLLD],
[Use lvmpolld by default.])
################################################################################
dnl -- Check dmfilemapd
AC_MSG_CHECKING(whether to build dmfilemapd)
AC_ARG_ENABLE(dmfilemapd, AC_HELP_STRING([--enable-dmfilemapd],
[enable the dmstats filemap daemon]),
BUILD_DMFILEMAPD=$enableval, BUILD_DMFILEMAPD=no)
AC_MSG_RESULT($BUILD_DMFILEMAPD)
AC_DEFINE([DMFILEMAPD], $BUILD_DMFILEMAPD, [Define to 1 to enable the device-mapper filemap daemon.])
dnl -- dmfilemapd requires FIEMAP
if test "$BUILD_DMFILEMAPD" = yes; then
AC_CHECK_HEADER([linux/fiemap.h], , [AC_MSG_ERROR(--enable-dmfilemapd requires fiemap.h)])
fi
################################################################################
dnl -- Build notifydbus
AC_MSG_CHECKING(whether to build notifydbus)
AC_ARG_ENABLE(notify-dbus,
AC_HELP_STRING([--enable-notify-dbus],
[enable LVM notification using dbus]),
NOTIFYDBUS_SUPPORT=$enableval, NOTIFYDBUS_SUPPORT=no)
AC_MSG_RESULT($NOTIFYDBUS_SUPPORT)
NOTIFYDBUS=$enableval)
AC_MSG_RESULT($NOTIFYDBUS)
if test "$NOTIFYDBUS_SUPPORT" = yes; then
BUILD_NOTIFYDBUS=$NOTIFYDBUS
if test "$BUILD_NOTIFYDBUS" = yes; then
AC_DEFINE([NOTIFYDBUS_SUPPORT], 1, [Define to 1 to include code that uses dbus notification.])
SYSTEMD_LIBS="-lsystemd"
LIBS="-lsystemd $LIBS"
fi
################################################################################
dnl -- Look for dbus libraries
if test "$NOTIFYDBUS_SUPPORT" = yes; then
if test "$BUILD_NOTIFYDBUS" = yes; then
PKG_CHECK_MODULES(NOTIFY_DBUS, systemd >= 221, [HAVE_NOTIFY_DBUS=yes], $bailout)
fi
################################################################################
dnl -- Enable blkid wiping functionality
AC_MSG_CHECKING(whether to enable libblkid detection of signatures when wiping)
AC_ARG_ENABLE(blkid_wiping,
AC_HELP_STRING([--disable-blkid_wiping],
[disable libblkid detection of signatures when wiping and use native code instead]),
BLKID_WIPING=$enableval, BLKID_WIPING=maybe)
AC_MSG_RESULT($BLKID_WIPING)
DEFAULT_USE_BLKID_WIPING=0
if test "$BLKID_WIPING" != no; then
pkg_config_init
PKG_CHECK_MODULES(BLKID, blkid >= 2.24,
[ BLKID_WIPING=yes
BLKID_PC="blkid"
DEFAULT_USE_BLKID_WIPING=1
AC_DEFINE([BLKID_WIPING_SUPPORT], 1, [Define to 1 to use libblkid detection of signatures when wiping.])
], [if test "$BLKID_WIPING" = maybe; then
[test "$BLKID_WIPING" = maybe && BLKID_WIPING=yes],
[if test "$BLKID_WIPING" = maybe; then
BLKID_WIPING=no
else
AC_MSG_ERROR([bailing out... blkid library >= 2.24 is required])
fi])
if test "$BLKID_WIPING" = yes; then
BLKID_PC="blkid"
DEFAULT_USE_BLKID_WIPING=1
AC_DEFINE([BLKID_WIPING_SUPPORT], 1, [Define to 1 to use libblkid detection of signatures when wiping.])
else
DEFAULT_USE_BLKID_WIPING=0
fi
else
DEFAULT_USE_BLKID_WIPING=0
fi
AC_MSG_CHECKING([whether to enable libblkid detection of signatures when wiping])
AC_MSG_RESULT($BLKID_WIPING)
AC_DEFINE_UNQUOTED(DEFAULT_USE_BLKID_WIPING, [$DEFAULT_USE_BLKID_WIPING],
[Use blkid wiping by default.])
################################################################################
dnl -- Enable udev-systemd protocol to instantiate a service for background jobs
dnl -- Requires systemd version 205 at least (including support for systemd-run)
AC_MSG_CHECKING(whether to use udev-systemd protocol for jobs in background)
AC_ARG_ENABLE(udev-systemd-background-jobs,
AC_HELP_STRING([--disable-udev-systemd-background-jobs],
[disable udev-systemd protocol to instantiate a service for background job]),
UDEV_SYSTEMD_BACKGROUND_JOBS=$enableval,
UDEV_SYSTEMD_BACKGROUND_JOBS=maybe)
AC_MSG_RESULT($UDEV_SYSTEMD_BACKGROUND_JOBS)
if test "$UDEV_SYSTEMD_BACKGROUND_JOBS" != no; then
pkg_config_init
PKG_CHECK_MODULES(SYSTEMD, systemd >= 205,
[UDEV_SYSTEMD_BACKGROUND_JOBS=yes],
[test "$UDEV_SYSTEMD_BACKGROUND_JOBS" = maybe && UDEV_SYSTEMD_BACKGROUND_JOBS=yes],
[if test "$UDEV_SYSTEMD_BACKGROUND_JOBS" = maybe; then
UDEV_SYSTEMD_BACKGROUND_JOBS=no
else
@@ -1277,9 +1344,6 @@ if test "$UDEV_SYSTEMD_BACKGROUND_JOBS" != no; then
fi])
fi
AC_MSG_CHECKING(whether to use udev-systemd protocol for jobs in background)
AC_MSG_RESULT($UDEV_SYSTEMD_BACKGROUND_JOBS)
################################################################################
dnl -- Enable udev synchronisation
AC_MSG_CHECKING(whether to enable synchronisation with udev processing)
@@ -1383,8 +1447,6 @@ AC_SUBST([LVM2APP_LIB])
test "$APPLIB" = yes \
&& LVM2APP_LIB=-llvm2app \
|| LVM2APP_LIB=
AS_IF([test "$APPLIB"],
[AC_MSG_WARN([liblvm2app is deprecated. Use D-Bus API])])
################################################################################
dnl -- Enable cmdlib
@@ -1405,8 +1467,6 @@ AC_ARG_ENABLE(dbus-service,
AC_HELP_STRING([--enable-dbus-service], [install D-Bus support]),
BUILD_LVMDBUSD=$enableval, BUILD_LVMDBUSD=no)
AC_MSG_RESULT($BUILD_LVMDBUSD)
AS_IF([test "$NOTIFYDBUS_SUPPORT" = yes && test "BUILD_LVMDBUSD" = yes],
[AC_MSG_WARN([Building D-Bus support without D-Bus notifications.])])
################################################################################
dnl -- Enable Python liblvm2app bindings
@@ -1433,9 +1493,6 @@ if test "$PYTHON_BINDINGS" = yes; then
AC_MSG_ERROR([--enable-python-bindings is replaced by --enable-python2-bindings and --enable-python3-bindings])
fi
m4_define_default([_AM_PYTHON_INTERPRETER_LIST],[ python3 python2 python dnl
python3.9 python3.8 python3.7 python3.6 python3.5 python3.4 python3.3 python3.2 python3.1 python3.0 dnl
python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 ])
if test "$PYTHON2_BINDINGS" = yes; then
AM_PATH_PYTHON([2])
AC_PATH_TOOL(PYTHON2, python2)
@@ -1448,7 +1505,7 @@ if test "$PYTHON2_BINDINGS" = yes; then
PYTHON2DIR=$pythondir
PYTHON_BINDINGS=yes
fi
if test "$PYTHON3_BINDINGS" = yes -o "$BUILD_LVMDBUSD" = yes; then
unset PYTHON PYTHON_CONFIG
unset am_cv_pathless_PYTHON ac_cv_path_PYTHON am_cv_python_platform
@@ -1462,7 +1519,7 @@ if test "$PYTHON3_BINDINGS" = yes -o "$BUILD_LVMDBUSD" = yes; then
PYTHON3_INCDIRS=`"$PYTHON3_CONFIG" --includes`
PYTHON3_LIBDIRS=`"$PYTHON3_CONFIG" --libs`
PYTHON3DIR=$pythondir
test "$PYTHON3_BINDINGS" = yes && PYTHON_BINDINGS=yes
PYTHON_BINDINGS=yes
fi
if test "$BUILD_LVMDBUSD" = yes; then
@@ -1472,7 +1529,6 @@ if test "$BUILD_LVMDBUSD" = yes; then
fi
if test "$PYTHON_BINDINGS" = yes -o "$PYTHON2_BINDINGS" = yes -o "$PYTHON3_BINDINGS" = yes; then
AC_MSG_WARN([Python bindings are deprecated. Use D-Bus API])
test "$APPLIB" != yes && AC_MSG_ERROR([Python_bindings require --enable-applib])
fi
@@ -1508,11 +1564,13 @@ dnl -- enable dmeventd handling
AC_MSG_CHECKING(whether to use dmeventd)
AC_ARG_ENABLE(dmeventd, AC_HELP_STRING([--enable-dmeventd],
[enable the device-mapper event daemon]),
BUILD_DMEVENTD=$enableval, BUILD_DMEVENTD=no)
AC_MSG_RESULT($BUILD_DMEVENTD)
DMEVENTD=$enableval)
AC_MSG_RESULT($DMEVENTD)
BUILD_DMEVENTD=$DMEVENTD
dnl -- dmeventd currently requires internal mirror support
if test "$BUILD_DMEVENTD" = yes; then
if test "$DMEVENTD" = yes; then
if test "$MIRRORS" != internal; then
AC_MSG_ERROR([--enable-dmeventd currently requires --with-mirrors=internal])
fi
@@ -1536,6 +1594,10 @@ AC_CHECK_LIB(c, canonicalize_file_name,
AC_DEFINE([HAVE_CANONICALIZE_FILE_NAME], 1,
[Define to 1 if canonicalize_file_name is available.]))
################################################################################
dnl -- Clear default exec_prefix - install into /sbin rather than /usr/sbin
test "$exec_prefix" = NONE -a "$prefix" = NONE && exec_prefix=""
################################################################################
dnl -- Check for dlopen
AC_CHECK_LIB(dl, dlopen,
@@ -1548,6 +1610,8 @@ AC_CHECK_LIB(dl, dlopen,
################################################################################
dnl -- Check for shared/static conflicts
if [[ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
-o "$SNAPSHOTS" = shared -o "$MIRRORS" = shared \
-o "$RAID" = shared -o "$CACHE" = shared \
\) -a "$STATIC_LINK" = yes ]]; then
AC_MSG_ERROR([Features cannot be 'shared' when building statically])
fi
@@ -1588,45 +1652,15 @@ if test "$SELINUX" = yes; then
HAVE_SELINUX=no ])
fi
################################################################################
dnl -- Check BLKZEROOUT support
AC_CACHE_CHECK([for BLKZEROOUT in sys/ioctl.h.],
[ac_cv_have_blkzeroout],
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
[#include <sys/ioctl.h>
#include <linux/fs.h>
int bar(void) { return ioctl(0, BLKZEROOUT, 0); }]
)], [ac_cv_have_blkzeroout=yes], [ac_cv_have_blkzeroout=no])])
AC_ARG_ENABLE(blkzeroout,
AC_HELP_STRING([--disable-blkzeroout],
[do not use BLKZEROOUT for device zeroing]),
BLKZEROOUT=$enableval, BLKZEROOUT=yes)
AC_MSG_CHECKING(whether to use BLKZEROOUT for device zeroing)
if test "$BLKZEROOUT" = yes; then
AC_IF_YES(ac_cv_have_blkzeroout,
AC_DEFINE(HAVE_BLKZEROOUT, 1,
[Define if ioctl BLKZEROOUT can be used for device zeroing.]),
BLKZEROOUT=no)
fi
AC_MSG_RESULT($BLKZEROOUT)
################################################################################
dnl -- Check for realtime clock support
RT_LIBS=
HAVE_REALTIME=no
if test "$REALTIME" = yes; then
AC_CHECK_FUNCS([clock_gettime], HAVE_REALTIME=yes)
AS_IF([test "$HAVE_REALTIME" != yes], [ # try again with -lrt
AC_CHECK_LIB([rt], [clock_gettime], RT_LIBS="-lrt"; HAVE_REALTIME=yes)])
AC_CHECK_LIB(rt, clock_gettime, HAVE_REALTIME=yes, HAVE_REALTIME=no)
if test "$HAVE_REALTIME" = yes; then
AC_DEFINE([HAVE_REALTIME], 1, [Define to 1 to include support for realtime clock.])
LIBS="-lrt $LIBS"
RT_LIB="-lrt"
else
AC_MSG_WARN(Disabling realtime clock)
fi
@@ -1648,16 +1682,6 @@ AC_IF_YES(ac_cv_stat_st_ctim,
dnl -- Check for getopt
AC_CHECK_HEADERS(getopt.h, AC_DEFINE([HAVE_GETOPTLONG], 1, [Define to 1 if getopt_long is available.]))
################################################################################
dnl -- Check for editline
if test "$EDITLINE" == yes; then
PKG_CHECK_MODULES([EDITLINE], [libedit], [
AC_DEFINE([EDITLINE_SUPPORT], 1,
[Define to 1 to include the LVM editline shell.])], AC_MSG_ERROR(
[libedit could not be found which is required for the --enable-readline option.])
)
fi
################################################################################
dnl -- Check for readline (Shamelessly copied from parted 1.4.17)
if test "$READLINE" != no; then
@@ -1680,7 +1704,6 @@ Note: (n)curses also seems to work as a substitute for termcap. This was
AC_DEFINE([READLINE_SUPPORT], 1,
[Define to 1 to include the LVM readline shell.])
dnl -- Try only with -lreadline and check for different symbol
READLINE=yes
LIBS=$lvm_saved_libs
AC_CHECK_LIB([readline], [rl_line_buffer],
[ READLINE_LIBS="-lreadline" ], [
@@ -1787,22 +1810,13 @@ dnl -- Ensure additional headers required
if test "$READLINE" = yes; then
AC_CHECK_HEADERS(readline/readline.h readline/history.h,,hard_bailout)
fi
AC_MSG_CHECKING(whether to enable readline)
AC_MSG_RESULT($READLINE)
if test "$EDITLINE" = yes; then
AC_CHECK_HEADERS(editline/readline.h editline/history.h,,hard_bailout)
fi
AC_MSG_CHECKING(whether to enable editline)
AC_MSG_RESULT($EDITLINE)
if test "$BUILD_CMIRRORD" = yes; then
AC_CHECK_FUNCS(atexit,,hard_bailout)
fi
if test "$BUILD_LVMLOCKD" = yes; then
AS_IF([test "$HAVE_REALTIME" != yes], [AC_MSG_ERROR([Realtime clock support is mandatory for lvmlockd.])])
AC_CHECK_FUNCS(strtoull,,hard_bailout)
AC_CHECK_FUNCS(clock_gettime strtoull,,hard_bailout)
fi
if test "$BUILD_LVMPOLLD" = yes; then
@@ -1822,7 +1836,7 @@ if test "$CLUSTER" != none; then
AC_CHECK_FUNCS(socket,,hard_bailout)
fi
if test "$BUILD_DMEVENTD" = yes; then
if test "$DMEVENTD" = yes; then
AC_CHECK_HEADERS(arpa/inet.h,,hard_bailout)
fi
@@ -1838,30 +1852,25 @@ if test "$UDEV_SYNC" = yes; then
AC_CHECK_HEADERS(sys/ipc.h sys/sem.h,,hard_bailout)
fi
if test "$BUILD_DMFILEMAPD" = yes; then
AC_CHECK_HEADERS([sys/inotify.h],,hard_bailout)
fi
################################################################################
AC_PATH_TOOL(MODPROBE_CMD, modprobe, [], [$PATH_SBIN])
AC_PATH_TOOL(MODPROBE_CMD, modprobe)
if test -n "$MODPROBE_CMD"; then
AC_DEFINE_UNQUOTED([MODPROBE_CMD], ["$MODPROBE_CMD"], [The path to 'modprobe', if available.])
fi
SYSCONFDIR="$(eval echo $(eval echo $sysconfdir))"
SBINDIR="$(eval echo $(eval echo $sbindir))"
LVM_PATH="$SBINDIR/lvm"
lvm_exec_prefix=$exec_prefix
test "$lvm_exec_prefix" = NONE && lvm_exec_prefix=$prefix
test "$lvm_exec_prefix" = NONE && lvm_exec_prefix=$ac_default_prefix
LVM_PATH="$lvm_exec_prefix/sbin/lvm"
AC_DEFINE_UNQUOTED(LVM_PATH, ["$LVM_PATH"], [Path to lvm binary.])
USRSBINDIR="$(eval echo $(eval echo $usrsbindir))"
CLVMD_PATH="$USRSBINDIR/clvmd"
clvmd_prefix=$ac_default_prefix
test "$prefix" != NONE && clvmd_prefix=$prefix
CLVMD_PATH="$clvmd_prefix/sbin/clvmd"
AC_DEFINE_UNQUOTED(CLVMD_PATH, ["$CLVMD_PATH"], [Path to clvmd binary.])
FSADM_PATH="$SBINDIR/fsadm"
AC_DEFINE_UNQUOTED(FSADM_PATH, ["$FSADM_PATH"], [Path to fsadm binary.])
################################################################################
dnl -- dmeventd pidfile and executable path
if test "$BUILD_DMEVENTD" = yes; then
@@ -1879,7 +1888,7 @@ if test "$BUILD_DMEVENTD" = yes; then
AC_HELP_STRING([--with-dmeventd-path=PATH],
[dmeventd path [EPREFIX/sbin/dmeventd]]),
DMEVENTD_PATH=$withval,
DMEVENTD_PATH="$SBINDIR/dmeventd")
DMEVENTD_PATH="$lvm_exec_prefix/sbin/dmeventd")
AC_DEFINE_UNQUOTED(DMEVENTD_PATH, ["$DMEVENTD_PATH"],
[Path to dmeventd binary.])
fi
@@ -1922,17 +1931,13 @@ AC_ARG_WITH(default-cache-subdir,
AC_DEFINE_UNQUOTED(DEFAULT_CACHE_SUBDIR, ["$DEFAULT_CACHE_SUBDIR"],
[Name of default metadata cache subdirectory.])
# Select default system locking dir, prefer /run/lock over /var/lock
DEFAULT_SYS_LOCK_DIR="$RUN_DIR/lock"
test -d "$DEFAULT_SYS_LOCK_DIR" || DEFAULT_SYS_LOCK_DIR="/var/lock"
# Support configurable locking subdir for lvm
AC_ARG_WITH(default-locking-dir,
AC_HELP_STRING([--with-default-locking-dir=DIR],
[default locking directory [autodetect_lock_dir/lvm]]),
DEFAULT_LOCK_DIR=$withval,
[AC_MSG_CHECKING(for default lock directory)
DEFAULT_LOCK_DIR="$DEFAULT_SYS_LOCK_DIR/lvm"
DEFAULT_LOCK_DIR="$RUN_DIR/lock/lvm"
test -d "$RUN_DIR/lock" || DEFAULT_LOCK_DIR="/var/lock/lvm"
AC_MSG_RESULT($DEFAULT_LOCK_DIR)])
AC_DEFINE_UNQUOTED(DEFAULT_LOCK_DIR, ["$DEFAULT_LOCK_DIR"],
[Name of default locking directory.])
@@ -1974,8 +1979,6 @@ LVM_MINOR=`echo "$VER" | $AWK -F '.' '{print $2}'`
LVM_PATCHLEVEL=`echo "$VER" | $AWK -F '[[(.]]' '{print $3}'`
LVM_LIBAPI=`echo "$VER" | $AWK -F '[[()]]' '{print $2}'`
AC_DEFINE_UNQUOTED(LVM_CONFIGURE_LINE, "$CONFIGURE_LINE", [configure command line used])
################################################################################
AC_SUBST(APPLIB)
AC_SUBST(AWK)
@@ -1988,7 +1991,7 @@ AC_SUBST(BUILD_LVMPOLLD)
AC_SUBST(BUILD_LVMLOCKD)
AC_SUBST(BUILD_LOCKDSANLOCK)
AC_SUBST(BUILD_LOCKDDLM)
AC_SUBST(BUILD_DMFILEMAPD)
AC_SUBST(BUILD_NOTIFYDBUS)
AC_SUBST(CACHE)
AC_SUBST(CFLAGS)
AC_SUBST(CFLOW_CMD)
@@ -2019,6 +2022,7 @@ AC_SUBST(DEFAULT_CACHE_SUBDIR)
AC_SUBST(DEFAULT_DATA_ALIGNMENT)
AC_SUBST(DEFAULT_DM_RUN_DIR)
AC_SUBST(DEFAULT_LOCK_DIR)
AC_SUBST(DEFAULT_FALLBACK_TO_LVM1)
AC_SUBST(DEFAULT_MIRROR_SEGTYPE)
AC_SUBST(DEFAULT_PID_DIR)
AC_SUBST(DEFAULT_PROFILE_SUBDIR)
@@ -2026,7 +2030,6 @@ AC_SUBST(DEFAULT_RAID10_SEGTYPE)
AC_SUBST(DEFAULT_RUN_DIR)
AC_SUBST(DEFAULT_SPARSE_SEGTYPE)
AC_SUBST(DEFAULT_SYS_DIR)
AC_SUBST(DEFAULT_SYS_LOCK_DIR)
AC_SUBST(DEFAULT_USE_BLKID_WIPING)
AC_SUBST(DEFAULT_USE_LVMETAD)
AC_SUBST(DEFAULT_USE_LVMPOLLD)
@@ -2035,11 +2038,11 @@ AC_SUBST(DEVMAPPER)
AC_SUBST(DLM_CFLAGS)
AC_SUBST(DLM_LIBS)
AC_SUBST(DL_LIBS)
AC_SUBST(DMEVENTD)
AC_SUBST(DMEVENTD_PATH)
AC_SUBST(DM_LIB_PATCHLEVEL)
AC_SUBST(ELDFLAGS)
AC_SUBST(FSADM)
AC_SUBST(FSADM_PATH)
AC_SUBST(BLKDEACTIVATE)
AC_SUBST(HAVE_LIBDL)
AC_SUBST(HAVE_REALTIME)
@@ -2049,6 +2052,8 @@ AC_SUBST(JOBS)
AC_SUBST(LDDEPS)
AC_SUBST(LIBS)
AC_SUBST(LIB_SUFFIX)
AC_SUBST(LVM1)
AC_SUBST(LVM1_FALLBACK)
AC_SUBST(LVM_VERSION)
AC_SUBST(LVM_LIBAPI)
AC_SUBST(LVM_MAJOR)
@@ -2063,8 +2068,8 @@ AC_SUBST(MIRRORS)
AC_SUBST(MSGFMT)
AC_SUBST(OCF)
AC_SUBST(OCFDIR)
AC_SUBST(ODIRECT)
AC_SUBST(PKGCONFIG)
AC_SUBST(POOL)
AC_SUBST(M_LIBS)
AC_SUBST(PTHREAD_LIBS)
AC_SUBST(PYTHON2)
@@ -2080,22 +2085,20 @@ AC_SUBST(PYTHON2DIR)
AC_SUBST(PYTHON3DIR)
AC_SUBST(QUORUM_CFLAGS)
AC_SUBST(QUORUM_LIBS)
AC_SUBST(RT_LIBS)
AC_SUBST(RAID)
AC_SUBST(RT_LIB)
AC_SUBST(READLINE_LIBS)
AC_SUBST(EDITLINE_LIBS)
AC_SUBST(REPLICATORS)
AC_SUBST(SACKPT_CFLAGS)
AC_SUBST(SACKPT_LIBS)
AC_SUBST(SALCK_CFLAGS)
AC_SUBST(SALCK_LIBS)
AC_SUBST(SBINDIR)
AC_SUBST(SELINUX_LIBS)
AC_SUBST(SELINUX_PC)
AC_SUBST(SYSCONFDIR)
AC_SUBST(SYSTEMD_LIBS)
AC_SUBST(SNAPSHOTS)
AC_SUBST(STATICDIR)
AC_SUBST(STATIC_LINK)
AC_SUBST(TESTING)
AC_SUBST(TESTSUITE_DATA)
AC_SUBST(THIN)
AC_SUBST(THIN_CHECK_CMD)
@@ -2113,7 +2116,6 @@ AC_SUBST(UDEV_SYSTEMD_BACKGROUND_JOBS)
AC_SUBST(UDEV_RULE_EXEC_DETECTION)
AC_SUBST(UDEV_HAS_BUILTIN_BLKID)
AC_SUBST(USE_TRACKING)
AC_SUBST(USRSBINDIR)
AC_SUBST(VALGRIND_POOL)
AC_SUBST(WRITE_INSTALL)
AC_SUBST(DMEVENTD_PIDFILE)
@@ -2152,17 +2154,11 @@ daemons/dmeventd/plugins/raid/Makefile
daemons/dmeventd/plugins/mirror/Makefile
daemons/dmeventd/plugins/snapshot/Makefile
daemons/dmeventd/plugins/thin/Makefile
daemons/dmeventd/plugins/vdo/Makefile
daemons/dmfilemapd/Makefile
daemons/lvmdbusd/Makefile
daemons/lvmdbusd/lvmdbusd
daemons/lvmdbusd/lvmdb.py
daemons/lvmdbusd/lvm_shell_proxy.py
daemons/lvmdbusd/path.py
daemons/lvmetad/Makefile
daemons/lvmpolld/Makefile
daemons/lvmlockd/Makefile
device_mapper/Makefile
conf/Makefile
conf/example.conf
conf/lvmlocal.conf
@@ -2171,8 +2167,16 @@ conf/metadata_profile_template.profile
include/.symlinks
include/Makefile
lib/Makefile
lib/format1/Makefile
lib/format_pool/Makefile
lib/locking/Makefile
lib/mirror/Makefile
lib/replicator/Makefile
include/lvm-version.h
lib/raid/Makefile
lib/snapshot/Makefile
lib/thin/Makefile
lib/cache_segtype/Makefile
libdaemon/Makefile
libdaemon/client/Makefile
libdaemon/server/Makefile
@@ -2209,14 +2213,15 @@ scripts/lvm2_monitoring_init_red_hat
scripts/lvm2_monitoring_systemd_red_hat.service
scripts/lvm2_pvscan_systemd_red_hat@.service
scripts/lvm2_tmpfiles_red_hat.conf
scripts/lvmdump.sh
scripts/Makefile
test/Makefile
test/api/Makefile
test/api/python_lvm_unit.py
test/unit/Makefile
tools/Makefile
udev/Makefile
unit-tests/datastruct/Makefile
unit-tests/regex/Makefile
unit-tests/mm/Makefile
])
AC_OUTPUT
@@ -2224,14 +2229,10 @@ AS_IF([test -n "$THIN_CONFIGURE_WARN"],
[AC_MSG_WARN([Support for thin provisioning is limited since some thin provisioning tools are missing!])])
AS_IF([test -n "$THIN_CHECK_VERSION_WARN"],
[AC_MSG_WARN([You should also install latest thin_check vsn 0.7.0 (or later) for lvm2 thin provisioning])])
[AC_MSG_WARN([You should also install thin_check vsn 0.3.2 (or later) to use lvm2 thin provisioning])])
AS_IF([test -n "$CACHE_CONFIGURE_WARN"],
[AC_MSG_WARN([Support for cache is limited since some cache tools are missing!])])
AS_IF([test -n "$CACHE_CHECK_VERSION_WARN"],
[AC_MSG_WARN([You should install latest cache_check vsn 0.7.0 to use lvm2 cache metadata format 2])])
AS_IF([test "$ODIRECT" != yes],
[AC_MSG_WARN([O_DIRECT disabled: low-memory pvmove may lock up])])

View File

@@ -41,21 +41,6 @@ struct lv_segment *last_seg(const struct logical_volume *lv)
return ((struct lv_segment **)lv)[0];
}
const char *find_config_tree_str(struct cmd_context *cmd, int id, struct profile *profile)
{
return "STRING";
}
/*
struct logical_volume *origin_from_cow(const struct logical_volume *lv)
{
if (lv)
return lv;
__coverity_panic__();
}
*/
/* simple_memccpy() from glibc */
void *memccpy(void *dest, const void *src, int c, size_t n)
{
@@ -86,17 +71,6 @@ void model_FD_ZERO(void *fdset)
((long*)fdset)[i] = 0;
}
/* Resent Coverity reports quite weird errors... */
int *__errno_location(void)
{
}
const unsigned short **__ctype_b_loc (void)
{
}
/*
* Added extra pointer check to not need these models,
* for now just keep then in file

View File

@@ -41,19 +41,15 @@ ifeq ("@BUILD_LVMPOLLD@", "yes")
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
SUBDIRS += lvmlockd
SUBDIRS += lvmlockd
endif
ifeq ("@BUILD_LVMDBUSD@", "yes")
SUBDIRS += lvmdbusd
endif
ifeq ("@BUILD_DMFILEMAPD@", "yes")
SUBDIRS += dmfilemapd
SUBDIRS += lvmdbusd
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld lvmlockd lvmdbusd dmfilemapd
SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld lvmlockd lvmdbusd
endif
include $(top_builddir)/make.tmpl

View File

@@ -31,9 +31,9 @@ SALCK_LIBS = @SALCK_LIBS@
SALCK_CFLAGS = @SALCK_CFLAGS@
SOURCES = \
clvmd-command.c\
clvmd.c\
lvm-functions.c\
clvmd-command.c \
clvmd.c \
lvm-functions.c \
refresh_clvmd.c
ifneq (,$(findstring cman,, "@CLVMD@,"))
@@ -72,17 +72,26 @@ endif
TARGETS = \
clvmd
LVMLIBS = $(LVMINTERNAL_LIBS)
ifeq ("@DMEVENTD@", "yes")
LVMLIBS += -ldevmapper-event
endif
include $(top_builddir)/make.tmpl
LIBS += $(LVMINTERNAL_LIBS) -ldevmapper $(PTHREAD_LIBS) -laio
LVMLIBS += -ldevmapper
LIBS += $(PTHREAD_LIBS)
CFLAGS += -fno-strict-aliasing $(EXTRA_EXEC_CFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS)
INSTALL_TARGETS = \
install_clvmd
clvmd: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a
$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) \
-o clvmd $(OBJECTS) $(LMLIBS) $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) -o clvmd $(OBJECTS) \
$(LVMLIBS) $(LMLIBS) $(LIBS)
.PHONY: install_clvmd

View File

@@ -108,6 +108,7 @@ int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
lock_flags = args[1];
lockname = &args[2];
/* Check to see if the VG is in use by LVM1 */
status = do_check_lvm1(lockname);
do_lock_vg(lock_cmd, lock_flags, lockname);
break;
@@ -170,10 +171,8 @@ int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
/* Check the status of the command and return the error text */
if (status) {
if (*buf)
*retlen = dm_snprintf(*buf, buflen, "%s", strerror(status)) + 1;
else
*retlen = 0;
*retlen = 1 + ((*buf) ? dm_snprintf(*buf, buflen, "%s",
strerror(status)) : -1);
}
return status;
@@ -207,7 +206,7 @@ static int lock_vg(struct local_client *client)
lock_mode = ((int) lock_cmd & LCK_TYPE_MASK);
/* lock_flags = args[1]; */
lockname = &args[2];
DEBUGLOG("(%p) doing PRE command LOCK_VG '%s' at %x\n", client, lockname, lock_cmd);
DEBUGLOG("doing PRE command LOCK_VG '%s' at %x (client=%p)\n", lockname, lock_cmd, client);
if (lock_mode == LCK_UNLOCK) {
if (!(lkid = (int) (long) dm_hash_lookup(lock_hash, lockname)))
@@ -324,7 +323,7 @@ void cmd_client_cleanup(struct local_client *client)
int lkid;
char *lockname;
DEBUGLOG("(%p) Client thread cleanup\n", client);
DEBUGLOG("Client thread cleanup (%p)\n", client);
if (!client->bits.localsock.private)
return;
@@ -333,7 +332,7 @@ void cmd_client_cleanup(struct local_client *client)
dm_hash_iterate(v, lock_hash) {
lkid = (int)(long)dm_hash_get_data(lock_hash, v);
lockname = dm_hash_get_key(lock_hash, v);
DEBUGLOG("(%p) Cleanup: Unlocking lock %s %x\n", client, lockname, lkid);
DEBUGLOG("Cleanup (%p): Unlocking lock %s %x\n", client, lockname, lkid);
(void) sync_unlock(lockname, lkid);
}

View File

@@ -532,7 +532,6 @@ static int _cluster_fd_callback(struct local_client *fd, char *buf, int len,
static int _cluster_send_message(const void *buf, int msglen, const char *csid,
const char *errtext)
{
static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
struct iovec iov[2];
cs_error_t err;
int target_node;
@@ -547,10 +546,7 @@ static int _cluster_send_message(const void *buf, int msglen, const char *csid,
iov[1].iov_base = (char *)buf;
iov[1].iov_len = msglen;
pthread_mutex_lock(&_mutex);
err = cpg_mcast_joined(cpg_handle, CPG_TYPE_AGREED, iov, 2);
pthread_mutex_unlock(&_mutex);
return cs_to_errno(err);
}

View File

@@ -425,6 +425,8 @@ static void _add_up_node(const char *csid)
DEBUGLOG("openais_add_up_node %d\n", ninfo->nodeid);
ninfo->state = NODE_CLVMD;
return;
}
/* Call a callback for each node, so the caller knows whether it's up or down */

View File

@@ -58,7 +58,6 @@
/* Head of the fd list. Also contains
the cluster_socket details */
static struct local_client local_client_head;
static int _local_client_count = 0;
static unsigned short global_xid = 0; /* Last transaction ID issued */
@@ -69,37 +68,6 @@ static unsigned max_csid_len;
static unsigned max_cluster_message;
static unsigned max_cluster_member_name_len;
static void _add_client(struct local_client *new_client, struct local_client *existing_client)
{
_local_client_count++;
DEBUGLOG("(%p) Adding listener for fd %d. (Now %d monitored fds.)\n", new_client, new_client->fd, _local_client_count);
new_client->next = existing_client->next;
existing_client->next = new_client;
}
int add_client(struct local_client *new_client)
{
_add_client(new_client, &local_client_head);
return 0;
}
/* Returns 0 if delfd is found and removed from list */
static int _del_client(struct local_client *delfd)
{
struct local_client *lastfd, *thisfd;
for (lastfd = &local_client_head; (thisfd = lastfd->next); lastfd = thisfd)
if (thisfd == delfd) {
DEBUGLOG("(%p) Removing listener for fd %d\n", thisfd, thisfd->fd);
lastfd->next = delfd->next;
_local_client_count--;
return 0;
}
return 1;
}
/* Structure of items on the LVM thread list */
struct lvm_thread_cmd {
struct dm_list list;
@@ -124,7 +92,6 @@ static const size_t STACK_SIZE = 128 * 1024;
static pthread_attr_t stack_attr;
static int lvm_thread_exit = 0;
static pthread_mutex_t lvm_thread_mutex;
static pthread_mutex_t _debuglog_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t lvm_thread_cond;
static pthread_barrier_t lvm_start_barrier;
static struct dm_list lvm_cmd_head;
@@ -251,17 +218,14 @@ void debuglog(const char *fmt, ...)
switch (clvmd_get_debug()) {
case DEBUG_STDERR:
pthread_mutex_lock(&_debuglog_mutex);
va_start(ap,fmt);
time(&P);
fprintf(stderr, "CLVMD[%x]: %.15s ", (int)pthread_self(), ctime_r(&P, buf_ctime) + 4);
vfprintf(stderr, fmt, ap);
va_end(ap);
fflush(stderr);
pthread_mutex_unlock(&_debuglog_mutex);
break;
case DEBUG_SYSLOG:
pthread_mutex_lock(&_debuglog_mutex);
if (!syslog_init) {
openlog("clvmd", LOG_PID, LOG_DAEMON);
syslog_init = 1;
@@ -270,7 +234,6 @@ void debuglog(const char *fmt, ...)
va_start(ap,fmt);
vsyslog(LOG_DEBUG, fmt, ap);
va_end(ap);
pthread_mutex_unlock(&_debuglog_mutex);
break;
case DEBUG_OFF:
break;
@@ -554,7 +517,7 @@ int main(int argc, char *argv[])
/* Initialise the LVM thread variables */
dm_list_init(&lvm_cmd_head);
if (pthread_attr_init(&stack_attr) ||
pthread_attr_setstacksize(&stack_attr, STACK_SIZE + getpagesize())) {
pthread_attr_setstacksize(&stack_attr, STACK_SIZE)) {
log_sys_error("pthread_attr_init", "");
exit(1);
}
@@ -621,7 +584,6 @@ int main(int argc, char *argv[])
local_client_head.fd = clops->get_main_cluster_fd();
local_client_head.type = CLUSTER_MAIN_SOCK;
local_client_head.callback = clops->cluster_fd_callback;
_local_client_count++;
/* Add the local socket to the list */
if (!(newfd = dm_zalloc(sizeof(struct local_client)))) {
@@ -632,14 +594,14 @@ int main(int argc, char *argv[])
newfd->fd = local_sock;
newfd->type = LOCAL_RENDEZVOUS;
newfd->callback = local_rendezvous_callback;
(void) add_client(newfd);
newfd->next = local_client_head.next;
local_client_head.next = newfd;
/* This needs to be started after cluster initialisation
as it may need to take out locks */
DEBUGLOG("Starting LVM thread\n");
DEBUGLOG("(%p) Main cluster socket fd %d with local socket %d (%p)\n",
&local_client_head, local_client_head.fd, newfd->fd, newfd);
DEBUGLOG("Main cluster socket fd %d (%p) with local socket %d (%p)\n",
local_client_head.fd, &local_client_head, newfd->fd, newfd);
/* Don't let anyone else to do work until we are started */
if (pthread_create(&lvm_thread, &stack_attr, lvm_thread_fn, &lvm_params)) {
@@ -675,7 +637,6 @@ int main(int argc, char *argv[])
while ((delfd = local_client_head.next)) {
local_client_head.next = delfd->next;
_local_client_count--;
/* Failing cleanup_zombie leaks... */
if (delfd->type == LOCAL_SOCK && !cleanup_zombie(delfd))
cmd_client_cleanup(delfd); /* calls sync_unlock */
@@ -737,13 +698,13 @@ static int local_rendezvous_callback(struct local_client *thisfd, char *buf,
pthread_mutex_init(&newfd->bits.localsock.mutex, NULL);
if (fcntl(client_fd, F_SETFD, 1))
DEBUGLOG("(%p) Setting CLOEXEC on client fd %d failed: %s\n", thisfd, client_fd, strerror(errno));
DEBUGLOG("Setting CLOEXEC on client fd failed: %s\n", strerror(errno));
newfd->fd = client_fd;
newfd->type = LOCAL_SOCK;
newfd->callback = local_sock_callback;
newfd->bits.localsock.all_success = 1;
DEBUGLOG("(%p) Got new connection on fd %d\n", newfd, newfd->fd);
DEBUGLOG("Got new connection on fd %d (%p)\n", newfd->fd, newfd);
*new_client = newfd;
}
return 1;
@@ -765,8 +726,8 @@ static int local_pipe_callback(struct local_client *thisfd, char *buf,
if (len == sizeof(int))
memcpy(&status, buffer, sizeof(int));
DEBUGLOG("(%p) Read on pipe %d, %d bytes, status %d\n",
thisfd, thisfd->fd, len, status);
DEBUGLOG("Read on pipe %d, %d bytes, status %d\n",
thisfd->fd, len, status);
/* EOF on pipe or an error, close it */
if (len <= 0) {
@@ -789,11 +750,11 @@ static int local_pipe_callback(struct local_client *thisfd, char *buf,
}
return -1;
} else {
DEBUGLOG("(%p) Background routine status was %d, sock_client %p\n",
thisfd, status, sock_client);
DEBUGLOG("Background routine status was %d, sock_client (%p)\n",
status, sock_client);
/* But has the client gone away ?? */
if (!sock_client) {
DEBUGLOG("(%p) Got pipe response for dead client, ignoring it\n", thisfd);
DEBUGLOG("Got pipe response for dead client, ignoring it\n");
} else {
/* If error then just return that code */
if (status)
@@ -833,7 +794,7 @@ static void timedout_callback(struct local_client *client, const char *csid,
return;
clops->name_from_csid(csid, nodename);
DEBUGLOG("(%p) Checking for a reply from %s\n", client, nodename);
DEBUGLOG("Checking for a reply from %s\n", nodename);
pthread_mutex_lock(&client->bits.localsock.mutex);
reply = client->bits.localsock.replies;
@@ -843,7 +804,7 @@ static void timedout_callback(struct local_client *client, const char *csid,
pthread_mutex_unlock(&client->bits.localsock.mutex);
if (!reply) {
DEBUGLOG("(%p) Node %s timed-out\n", client, nodename);
DEBUGLOG("Node %s timed-out\n", nodename);
add_reply_to_list(client, ETIMEDOUT, csid,
"Command timed out", 18);
}
@@ -858,7 +819,7 @@ static void timedout_callback(struct local_client *client, const char *csid,
*/
static void request_timed_out(struct local_client *client)
{
DEBUGLOG("(%p) Request timed-out. padding\n", client);
DEBUGLOG("Request timed-out. padding\n");
clops->cluster_do_node_callback(client, timedout_callback);
if (!client->bits.localsock.threadid)
@@ -887,17 +848,18 @@ static void main_loop(int cmd_timeout)
sigemptyset(&ss);
sigaddset(&ss, SIGINT);
sigaddset(&ss, SIGTERM);
if (pthread_sigmask(SIG_UNBLOCK, &ss, NULL))
log_warn("WARNING: Failed to unblock SIGCHLD.");
pthread_sigmask(SIG_UNBLOCK, &ss, NULL);
/* Main loop */
while (!quit) {
fd_set in;
int select_status;
struct local_client *thisfd, *nextfd;
struct local_client *thisfd;
struct timeval tv = { cmd_timeout, 0 };
int quorate = clops->is_quorate();
int client_count = 0;
int max_fd = 0;
struct local_client *lastfd = &local_client_head;
struct local_client *nextfd = local_client_head.next;
/* Wait on the cluster FD and all local sockets/pipes */
local_client_head.fd = clops->get_main_cluster_fd();
@@ -913,22 +875,21 @@ static void main_loop(int cmd_timeout)
fprintf(stderr, "WARNING: Your cluster may freeze up if the number of clvmd file descriptors (%d) exceeds %d.\n", max_fd + 1, FD_SETSIZE);
}
for (thisfd = &local_client_head; thisfd; thisfd = nextfd) {
nextfd = thisfd->next;
for (thisfd = &local_client_head; thisfd; thisfd = nextfd, nextfd = thisfd ? thisfd->next : NULL) {
if (thisfd->removeme && !cleanup_zombie(thisfd)) {
/* cleanup_zombie might have removed the next list element */
nextfd = thisfd->next;
(void) _del_client(thisfd);
DEBUGLOG("(%p) removeme set with %d monitored fds remaining\n", thisfd, _local_client_count);
struct local_client *free_fd = thisfd;
lastfd->next = nextfd;
DEBUGLOG("removeme set for %p with %d monitored fds remaining\n", free_fd, client_count - 1);
/* Queue cleanup, this also frees the client struct */
add_to_lvmqueue(thisfd, NULL, 0, NULL);
add_to_lvmqueue(free_fd, NULL, 0, NULL);
continue;
}
lastfd = thisfd;
if (thisfd->removeme)
continue;
@@ -978,15 +939,16 @@ static void main_loop(int cmd_timeout)
type == CLUSTER_INTERNAL)
goto closedown;
DEBUGLOG("(%p) ret == %d, errno = %d. removing client\n",
thisfd, ret, errno);
DEBUGLOG("ret == %d, errno = %d. removing client\n",
ret, errno);
thisfd->removeme = 1;
continue;
}
/* New client...simply add it to the list */
if (newfd) {
_add_client(newfd, thisfd);
newfd->next = thisfd->next;
thisfd->next = newfd;
thisfd = newfd;
}
}
@@ -1004,8 +966,8 @@ static void main_loop(int cmd_timeout)
thisfd->bits.localsock.expected_replies !=
thisfd->bits.localsock.num_replies) {
/* Send timed out message + replies we already have */
DEBUGLOG("Request to client %p timed-out (send: %ld, now: %ld)\n",
thisfd, thisfd->bits.localsock.sent_time, the_time);
DEBUGLOG("Request timed-out (send: %ld, now: %ld)\n",
thisfd->bits.localsock.sent_time, the_time);
thisfd->bits.localsock.all_success = 0;
@@ -1106,31 +1068,31 @@ static void be_daemon(int timeout)
break;
default: /* Parent */
(void) close(devnull);
(void) close(child_pipe[1]);
wait_for_child(child_pipe[0], timeout); /* noreturn */
wait_for_child(child_pipe[0], timeout);
}
/* Detach ourself from the calling environment */
if ((dup2(devnull, STDIN_FILENO) == -1) ||
(dup2(devnull, STDOUT_FILENO) == -1) ||
(dup2(devnull, STDERR_FILENO) == -1)) {
if (close(0) || close(1) || close(2)) {
perror("Error closing terminal FDs");
exit(4);
}
setsid();
if (dup2(devnull, 0) < 0 || dup2(devnull, 1) < 0
|| dup2(devnull, 2) < 0) {
perror("Error setting terminal FDs to /dev/null");
log_error("Error setting terminal FDs to /dev/null: %m");
exit(5);
}
if ((devnull > STDERR_FILENO) && close(devnull)) {
log_sys_error("close", "/dev/null");
exit(7);
}
if (chdir("/")) {
log_error("Error setting current directory to /: %m");
exit(6);
}
setsid();
}
static int verify_message(char *buf, int len)
@@ -1217,8 +1179,8 @@ static int cleanup_zombie(struct local_client *thisfd)
if (!thisfd->bits.localsock.cleanup_needed)
return 0;
DEBUGLOG("(%p) EOF on local socket %d: inprogress=%d\n",
thisfd, thisfd->fd, thisfd->bits.localsock.in_progress);
DEBUGLOG("EOF on local socket: inprogress=%d\n",
thisfd->bits.localsock.in_progress);
if ((pipe_client = thisfd->bits.localsock.pipe_client))
pipe_client = pipe_client->bits.pipe.client;
@@ -1240,7 +1202,7 @@ static int cleanup_zombie(struct local_client *thisfd)
/* Kill the subthread & free resources */
if (thisfd->bits.localsock.threadid) {
DEBUGLOG("(%p) Waiting for pre&post thread\n", pipe_client);
DEBUGLOG("Waiting for pre&post thread (%p)\n", pipe_client);
pthread_mutex_lock(&thisfd->bits.localsock.mutex);
thisfd->bits.localsock.state = PRE_COMMAND;
thisfd->bits.localsock.finished = 1;
@@ -1251,22 +1213,26 @@ static int cleanup_zombie(struct local_client *thisfd)
(void **) &status)))
log_sys_error("pthread_join", "");
DEBUGLOG("(%p) Joined pre&post thread\n", pipe_client);
DEBUGLOG("Joined pre&post thread\n");
thisfd->bits.localsock.threadid = 0;
/* Remove the pipe client */
if (thisfd->bits.localsock.pipe_client) {
struct local_client *delfd = thisfd->bits.localsock.pipe_client;
struct local_client *delfd;
struct local_client *lastfd;
(void) close(delfd->fd); /* Close pipe */
(void) close(thisfd->bits.localsock.pipe_client->fd); /* Close pipe */
(void) close(thisfd->bits.localsock.pipe);
/* Remove pipe client */
if (!_del_client(delfd)) {
dm_free(delfd);
thisfd->bits.localsock.pipe_client = NULL;
}
for (lastfd = &local_client_head; (delfd = lastfd->next); lastfd = delfd)
if (thisfd->bits.localsock.pipe_client == delfd) {
thisfd->bits.localsock.pipe_client = NULL;
lastfd->next = delfd->next;
dm_free(delfd);
break;
}
}
}
@@ -1297,7 +1263,7 @@ static int read_from_local_sock(struct local_client *thisfd)
if (len == -1 && errno == EINTR)
return 1;
DEBUGLOG("(%p) Read on local socket %d, len = %d\n", thisfd, thisfd->fd, len);
DEBUGLOG("Read on local socket %d, len = %d\n", thisfd->fd, len);
if (len && verify_message(buffer, len) < 0) {
log_error("read_from_local_sock from %d len %d bad verify.",
@@ -1371,15 +1337,15 @@ static int read_from_local_sock(struct local_client *thisfd)
char *argptr = inheader->node + strlen(inheader->node) + 1;
while (missing_len > 0) {
DEBUGLOG("(%p) got %d bytes, need another %d (total %d)\n",
thisfd, argslen, missing_len, inheader->arglen);
DEBUGLOG("got %d bytes, need another %d (total %d)\n",
argslen, missing_len, inheader->arglen);
len = read(thisfd->fd, argptr + argslen, missing_len);
if (len == -1 && errno == EINTR)
continue;
if (len <= 0) {
/* EOF or error on socket */
DEBUGLOG("(%p) EOF on local socket\n", thisfd);
DEBUGLOG("EOF on local socket\n");
dm_free(thisfd->bits.localsock.cmd);
thisfd->bits.localsock.cmd = NULL;
return 0;
@@ -1407,7 +1373,7 @@ static int read_from_local_sock(struct local_client *thisfd)
.status = ENOENT
};
DEBUGLOG("(%p) Unknown node: '%s'\n", thisfd, inheader->node);
DEBUGLOG("Unknown node: '%s'\n", inheader->node);
send_message(&reply, sizeof(reply), our_csid, thisfd->fd,
"Error sending ENOENT reply to local user");
thisfd->bits.localsock.expected_replies = 0;
@@ -1433,7 +1399,7 @@ static int read_from_local_sock(struct local_client *thisfd)
.status = EBUSY
};
DEBUGLOG("(%p) Creating pipe failed: %s\n", thisfd, strerror(errno));
DEBUGLOG("Creating pipe failed: %s\n", strerror(errno));
send_message(&reply, sizeof(reply), our_csid, thisfd->fd,
"Error sending EBUSY reply to local user");
return len;
@@ -1453,7 +1419,7 @@ static int read_from_local_sock(struct local_client *thisfd)
return len;
}
DEBUGLOG("(%p) Creating pipe, [%d, %d]\n", thisfd, comms_pipe[0], comms_pipe[1]);
DEBUGLOG("Creating pipe, [%d, %d]\n", comms_pipe[0], comms_pipe[1]);
if (fcntl(comms_pipe[0], F_SETFD, 1))
DEBUGLOG("setting CLOEXEC on pipe[0] failed: %s\n", strerror(errno));
@@ -1464,8 +1430,8 @@ static int read_from_local_sock(struct local_client *thisfd)
newfd->type = THREAD_PIPE;
newfd->callback = local_pipe_callback;
newfd->bits.pipe.client = thisfd;
_add_client(newfd, thisfd);
newfd->next = thisfd->next;
thisfd->next = newfd;
/* Store a cross link to the pipe */
thisfd->bits.localsock.pipe_client = newfd;
@@ -1478,10 +1444,10 @@ static int read_from_local_sock(struct local_client *thisfd)
thisfd->bits.localsock.in_progress = TRUE;
thisfd->bits.localsock.state = PRE_COMMAND;
thisfd->bits.localsock.cleanup_needed = 1;
DEBUGLOG("(%p) Creating pre&post thread for pipe fd %d\n", newfd, newfd->fd);
DEBUGLOG("Creating pre&post thread for pipe fd %d (%p)\n", newfd->fd, newfd);
status = pthread_create(&thisfd->bits.localsock.threadid,
&stack_attr, pre_and_post_thread, thisfd);
DEBUGLOG("(%p) Created pre&post thread, state = %d\n", newfd, status);
DEBUGLOG("Created pre&post thread, state = %d\n", status);
return len;
}
@@ -1489,6 +1455,13 @@ static int read_from_local_sock(struct local_client *thisfd)
/* Add a file descriptor from the cluster or comms interface to
our list of FDs for select
*/
int add_client(struct local_client *new_client)
{
new_client->next = local_client_head.next;
local_client_head.next = new_client;
return 0;
}
/* Called when the pre-command has completed successfully - we
now execute the real command on all the requested nodes */
@@ -1499,8 +1472,8 @@ static int distribute_command(struct local_client *thisfd)
int len = thisfd->bits.localsock.cmd_len;
thisfd->xid = global_xid++;
DEBUGLOG("(%p) distribute command: XID = %d, flags=0x%x (%s%s)\n",
thisfd, thisfd->xid, inheader->flags,
DEBUGLOG("distribute command: XID = %d, flags=0x%x (%s%s)\n",
thisfd->xid, inheader->flags,
(inheader->flags & CLVMD_FLAG_LOCAL) ? "LOCAL" : "",
(inheader->flags & CLVMD_FLAG_REMOTE) ? "REMOTE" : "");
@@ -1522,7 +1495,7 @@ static int distribute_command(struct local_client *thisfd)
*/
add_to_lvmqueue(thisfd, inheader, len, NULL);
DEBUGLOG("(%p) Sending message to all cluster nodes\n", thisfd);
DEBUGLOG("Sending message to all cluster nodes\n");
inheader->xid = thisfd->xid;
send_message(inheader, len, NULL, -1,
"Error forwarding message to cluster");
@@ -1541,11 +1514,11 @@ static int distribute_command(struct local_client *thisfd)
/* Are we the requested node ?? */
if (memcmp(csid, our_csid, max_csid_len) == 0) {
DEBUGLOG("(%p) Doing command on local node only\n", thisfd);
DEBUGLOG("Doing command on local node only\n");
add_to_lvmqueue(thisfd, inheader, len, NULL);
} else {
DEBUGLOG("(%p) Sending message to single node: %s\n",
thisfd, inheader->node);
DEBUGLOG("Sending message to single node: %s\n",
inheader->node);
inheader->xid = thisfd->xid;
send_message(inheader, len, csid, -1,
"Error forwarding message to cluster node");
@@ -1556,7 +1529,7 @@ static int distribute_command(struct local_client *thisfd)
thisfd->bits.localsock.in_progress = TRUE;
thisfd->bits.localsock.expected_replies = 1;
thisfd->bits.localsock.num_replies = 0;
DEBUGLOG("(%p) Doing command explicitly on local node only\n", thisfd);
DEBUGLOG("Doing command explicitly on local node only\n");
add_to_lvmqueue(thisfd, inheader, len, NULL);
}
@@ -1682,7 +1655,7 @@ static void add_reply_to_list(struct local_client *client, int status,
reply->status = status;
clops->name_from_csid(csid, reply->node);
DEBUGLOG("(%p) Reply from node %s: %d bytes\n", client, reply->node, len);
DEBUGLOG("Reply from node %s: %d bytes\n", reply->node, len);
if (len > 0) {
if (!(reply->replymsg = dm_malloc(len)))
@@ -1709,8 +1682,8 @@ static void add_reply_to_list(struct local_client *client, int status,
client->bits.localsock.state = POST_COMMAND;
pthread_cond_signal(&client->bits.localsock.cond);
}
DEBUGLOG("(%p) Got %d replies, expecting: %d\n",
client, client->bits.localsock.num_replies,
DEBUGLOG("Got %d replies, expecting: %d\n",
client->bits.localsock.num_replies,
client->bits.localsock.expected_replies);
}
pthread_mutex_unlock(&client->bits.localsock.mutex);
@@ -1725,19 +1698,18 @@ static __attribute__ ((noreturn)) void *pre_and_post_thread(void *arg)
sigset_t ss;
int pipe_fd = client->bits.localsock.pipe;
DEBUGLOG("(%p) Pre&post thread pipe fd %d\n", client, pipe_fd);
DEBUGLOG("Pre&post thread (%p), pipe fd %d\n", client, pipe_fd);
pthread_mutex_lock(&client->bits.localsock.mutex);
/* Ignore SIGUSR1 (handled by master process) but enable
SIGUSR2 (kills subthreads) */
sigemptyset(&ss);
sigaddset(&ss, SIGUSR1);
if (pthread_sigmask(SIG_BLOCK, &ss, NULL))
log_warn("WARNING: Failed to block SIGUSR1.");
pthread_sigmask(SIG_BLOCK, &ss, NULL);
sigdelset(&ss, SIGUSR1);
sigaddset(&ss, SIGUSR2);
if (pthread_sigmask(SIG_UNBLOCK, &ss, NULL))
log_warn("WARNING: Failed to unblock SIGUSR2.");
pthread_sigmask(SIG_UNBLOCK, &ss, NULL);
/* Loop around doing PRE and POST functions until the client goes away */
while (!client->bits.localsock.finished) {
@@ -1746,7 +1718,7 @@ static __attribute__ ((noreturn)) void *pre_and_post_thread(void *arg)
if ((status = do_pre_command(client)))
client->bits.localsock.all_success = 0;
DEBUGLOG("(%p) Pre&post thread writes status %d down to pipe fd %d\n",
DEBUGLOG("Pre&post thread (%p) writes status %d down to pipe fd %d\n",
client, status, pipe_fd);
/* Tell the parent process we have finished this bit */
@@ -1764,13 +1736,13 @@ static __attribute__ ((noreturn)) void *pre_and_post_thread(void *arg)
/* We may need to wait for the condition variable before running the post command */
if (client->bits.localsock.state != POST_COMMAND &&
!client->bits.localsock.finished) {
DEBUGLOG("(%p) Pre&post thread waiting to do post command, state = %d\n",
DEBUGLOG("Pre&post thread (%p) waiting to do post command, state = %d\n",
client, client->bits.localsock.state);
pthread_cond_wait(&client->bits.localsock.cond,
&client->bits.localsock.mutex);
}
DEBUGLOG("(%p) Pre&post thread got post command condition...\n", client);
DEBUGLOG("Pre&post thread (%p) got post command condition...\n", client);
/* POST function must always run, even if the client aborts */
status = 0;
@@ -1784,15 +1756,15 @@ static __attribute__ ((noreturn)) void *pre_and_post_thread(void *arg)
next_pre:
if (client->bits.localsock.state != PRE_COMMAND &&
!client->bits.localsock.finished) {
DEBUGLOG("(%p) Pre&post thread waiting for next pre command\n", client);
DEBUGLOG("Pre&post thread (%p) waiting for next pre command\n", client);
pthread_cond_wait(&client->bits.localsock.cond,
&client->bits.localsock.mutex);
}
DEBUGLOG("(%p) Pre&post thread got pre command condition...\n", client);
DEBUGLOG("Pre&post thread (%p) got pre command condition...\n", client);
}
pthread_mutex_unlock(&client->bits.localsock.mutex);
DEBUGLOG("(%p) Pre&post thread finished\n", client);
DEBUGLOG("Pre&post thread (%p) finished\n", client);
pthread_exit(NULL);
}
@@ -1810,8 +1782,8 @@ static int process_local_command(struct clvm_header *msg, int msglen,
if (!(replybuf = dm_malloc(max_cluster_message)))
return -1;
DEBUGLOG("(%p) process_local_command: %s msg=%p, msglen =%d\n",
client, decode_cmd(msg->cmd), msg, msglen);
DEBUGLOG("process_local_command: %s msg=%p, msglen =%d, client=%p\n",
decode_cmd(msg->cmd), msg, msglen, client);
/* If remote flag is set, just set a successful status code. */
if (msg->flags & CLVMD_FLAG_REMOTE)
@@ -1826,8 +1798,8 @@ static int process_local_command(struct clvm_header *msg, int msglen,
if (xid == client->xid)
add_reply_to_list(client, status, our_csid, replybuf, replylen);
else
DEBUGLOG("(%p) Local command took too long, discarding xid %d, current is %d\n",
client, xid, client->xid);
DEBUGLOG("Local command took too long, discarding xid %d, current is %d\n",
xid, client->xid);
dm_free(replybuf);
@@ -1869,7 +1841,7 @@ static void send_local_reply(struct local_client *client, int status, int fd)
char *ptr;
int message_len = 0;
DEBUGLOG("(%p) Send local reply\n", client);
DEBUGLOG("Send local reply\n");
/* Work out the total size of the reply */
while (thisreply) {
@@ -1886,7 +1858,7 @@ static void send_local_reply(struct local_client *client, int status, int fd)
/* Add in the size of our header */
message_len = message_len + sizeof(struct clvm_header);
if (!(replybuf = dm_malloc(message_len))) {
DEBUGLOG("(%p) Memory allocation fails\n", client);
DEBUGLOG("Memory allocation fails\n");
return;
}
@@ -2001,9 +1973,6 @@ static int send_message(void *buf, int msglen, const char *csid, int fd,
return clops->cluster_send_message(buf, msglen, csid, errtext);
}
if (fd < 0)
return 0;
/* Make sure it all goes */
for (ptr = 0; ptr < msglen;) {
if ((len = write(fd, (char*)buf + ptr, msglen - ptr)) <= 0) {
@@ -2018,7 +1987,6 @@ static int send_message(void *buf, int msglen, const char *csid, int fd,
(void) nanosleep (&delay, &remtime);
continue;
}
DEBUGLOG("%s", errtext);
log_error("%s", errtext);
break;
}
@@ -2032,7 +2000,7 @@ static int process_work_item(struct lvm_thread_cmd *cmd)
{
/* If msg is NULL then this is a cleanup request */
if (cmd->msg == NULL) {
DEBUGLOG("(%p) process_work_item: free\n", cmd->client);
DEBUGLOG("process_work_item: free %p\n", cmd->client);
cmd_client_cleanup(cmd->client);
pthread_mutex_destroy(&cmd->client->bits.localsock.mutex);
pthread_cond_destroy(&cmd->client->bits.localsock.cond);
@@ -2041,11 +2009,11 @@ static int process_work_item(struct lvm_thread_cmd *cmd)
}
if (!cmd->remote) {
DEBUGLOG("(%p) process_work_item: local\n", cmd->client);
DEBUGLOG("process_work_item: local\n");
process_local_command(cmd->msg, cmd->msglen, cmd->client,
cmd->xid);
} else {
DEBUGLOG("(%p) process_work_item: remote\n", cmd->client);
DEBUGLOG("process_work_item: remote\n");
process_remote_command(cmd->msg, cmd->msglen, cmd->client->fd,
cmd->csid);
}
@@ -2139,8 +2107,8 @@ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg,
} else
cmd->remote = 0;
DEBUGLOG("(%p) add_to_lvmqueue: cmd=%p, msg=%p, len=%d, csid=%p, xid=%d\n",
client, cmd, msg, msglen, csid, cmd->xid);
DEBUGLOG("add_to_lvmqueue: cmd=%p. client=%p, msg=%p, len=%d, csid=%p, xid=%d\n",
cmd, client, msg, msglen, csid, cmd->xid);
pthread_mutex_lock(&lvm_thread_mutex);
if (lvm_thread_exit) {
pthread_mutex_unlock(&lvm_thread_mutex);
@@ -2156,14 +2124,6 @@ static int add_to_lvmqueue(struct local_client *client, struct clvm_header *msg,
}
/* Return 0 if we can talk to an existing clvmd */
/*
* FIXME:
*
* This function returns only -1 or 0, but there are
* different levels of errors, some of them should stop
* further execution of clvmd thus another state is needed
* and some error message need to be only informational.
*/
static int check_local_clvmd(void)
{
int local_socket;
@@ -2183,11 +2143,7 @@ static int check_local_clvmd(void)
if (connect(local_socket,(struct sockaddr *) &sockaddr,
sizeof(sockaddr))) {
/* connection failure is expected state */
if (errno == ENOENT)
log_sys_debug("connect", "local socket");
else
log_sys_error("connect", "local socket");
log_sys_error("connect", "local socket");
ret = -1;
}
@@ -2288,8 +2244,7 @@ static void check_all_callback(struct local_client *client, const char *csid,
If not, returns -1 and prints out a list of errant nodes */
static int check_all_clvmds_running(struct local_client *client)
{
DEBUGLOG("(%p) check_all_clvmds_running\n", client);
DEBUGLOG("check_all_clvmds_running\n");
return clops->cluster_do_node_callback(client, check_all_callback);
}
@@ -2328,11 +2283,13 @@ static void ntoh_clvm(struct clvm_header *hdr)
static void sigusr2_handler(int sig)
{
DEBUGLOG("SIGUSR2 received\n");
return;
}
static void sigterm_handler(int sig)
{
quit = 1;
return;
}
static void sighup_handler(int sig)

View File

@@ -639,6 +639,16 @@ int post_lock_lv(unsigned char command, unsigned char lock_flags,
return 0;
}
/* Check if a VG is in use by LVM1 so we don't stomp on it */
int do_check_lvm1(const char *vgname)
{
int status;
status = check_lvm1_vg_inactive(cmd, vgname);
return status == 1 ? 0 : EBUSY;
}
int do_refresh_cache(void)
{
DEBUGLOG("Refreshing context\n");
@@ -651,9 +661,10 @@ int do_refresh_cache(void)
return -1;
}
init_full_scan_done(0);
init_ignore_suspended_devices(1);
lvmcache_force_next_label_scan();
lvmcache_label_scan(cmd);
label_scan_destroy(cmd); /* destroys bcache (to close devs), keeps lvmcache */
dm_pool_empty(cmd->mem);
pthread_mutex_unlock(&lvm_lock);
@@ -796,7 +807,8 @@ static void lvm2_log_fn(int level, const char *file, int line, int dm_errno,
if (level != _LOG_ERR && level != _LOG_FATAL)
return;
(void) dm_strncpy(last_error, message, sizeof(last_error));
strncpy(last_error, message, sizeof(last_error));
last_error[sizeof(last_error)-1] = '\0';
}
/* This checks some basic cluster-LVM configuration stuff */
@@ -832,7 +844,7 @@ void lvm_do_backup(const char *vgname)
pthread_mutex_lock(&lvm_lock);
vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, 0, 0, WARN_PV_READ, &consistent);
vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, WARN_PV_READ, &consistent);
if (vg && consistent)
check_current_backup(vg);

View File

@@ -25,6 +25,7 @@ extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
extern const char *do_lock_query(char *resource);
extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
char *resource);
extern int do_check_lvm1(const char *vgname);
extern int do_refresh_cache(void);
extern int init_clvm(struct dm_hash_table *excl_uuid);
extern void destroy_lvm(void);

View File

@@ -29,7 +29,7 @@ include $(top_builddir)/make.tmpl
LIBS += -ldevmapper
LMLIBS += $(CPG_LIBS) $(SACKPT_LIBS)
CFLAGS += $(CPG_CFLAGS) $(SACKPT_CFLAGS) $(EXTRA_EXEC_CFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
LDFLAGS += $(EXTRA_EXEC_LDFLAGS)
cmirrord: $(OBJECTS) $(top_builddir)/lib/liblvm-internal.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) \

View File

@@ -166,9 +166,6 @@ int cluster_send(struct clog_request *rq)
{
int r;
int found = 0;
#if CMIRROR_HAS_CHECKPOINT
int count = 0;
#endif
struct iovec iov;
struct clog_cpg *entry;
@@ -185,7 +182,7 @@ int cluster_send(struct clog_request *rq)
}
/*
* Once the request heads for the cluster, the luid loses
* Once the request heads for the cluster, the luid looses
* all its meaning.
*/
rq->u_rq.luid = 0;
@@ -206,6 +203,8 @@ int cluster_send(struct clog_request *rq)
#if CMIRROR_HAS_CHECKPOINT
do {
int count = 0;
r = cpg_mcast_joined(entry->handle, CPG_TYPE_AGREED, &iov, 1);
if (r != SA_AIS_ERR_TRY_AGAIN)
break;
@@ -1631,7 +1630,7 @@ int create_cluster_cpg(char *uuid, uint64_t luid)
size = ((strlen(uuid) + 1) > CPG_MAX_NAME_LENGTH) ?
CPG_MAX_NAME_LENGTH : (strlen(uuid) + 1);
(void) dm_strncpy(new->name.value, uuid, size);
strncpy(new->name.value, uuid, size);
new->name.length = (uint32_t)size;
new->luid = luid;

View File

@@ -12,7 +12,6 @@
#include "logging.h"
#include "functions.h"
#include <sys/sysmacros.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
@@ -377,7 +376,7 @@ static int _clog_ctr(char *uuid, uint64_t luid,
uint32_t block_on_error = 0;
int disk_log;
char disk_path[PATH_MAX];
char disk_path[128];
int unlink_path = 0;
long page_size;
int pages;
@@ -451,19 +450,15 @@ static int _clog_ctr(char *uuid, uint64_t luid,
lc->skip_bit_warning = region_count;
lc->disk_fd = -1;
lc->log_dev_failed = 0;
if (!dm_strncpy(lc->uuid, uuid, DM_UUID_LEN)) {
LOG_ERROR("Cannot use too long UUID %s.", uuid);
r = -EINVAL;
goto fail;
}
strncpy(lc->uuid, uuid, DM_UUID_LEN);
lc->luid = luid;
if (get_log(lc->uuid, lc->luid) ||
get_pending_log(lc->uuid, lc->luid)) {
LOG_ERROR("[%s/%" PRIu64 "u] Log already exists, unable to create.",
SHORT_UUID(lc->uuid), lc->luid);
r = -EINVAL;
goto fail;
dm_free(lc);
return -EINVAL;
}
dm_list_init(&lc->mark_list);

View File

@@ -14,6 +14,7 @@
#define _LVM_CLOG_LOGGING_H
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
#include "configure.h"
#include <stdio.h>

View File

@@ -56,16 +56,18 @@ include $(top_builddir)/make.tmpl
all: device-mapper
device-mapper: $(TARGETS)
LIBS += -ldevmapper
LVMLIBS += -ldevmapper-event $(PTHREAD_LIBS)
CFLAGS_dmeventd.o += $(EXTRA_EXEC_CFLAGS)
LIBS += -ldevmapper $(PTHREAD_LIBS)
dmeventd: $(LIB_SHARED) dmeventd.o
$(CC) $(CFLAGS) -L. $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) dmeventd.o \
-o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -L. -o $@ dmeventd.o \
$(DL_LIBS) $(LVMLIBS) $(LIBS) -rdynamic
dmeventd.static: $(LIB_STATIC) dmeventd.o $(interfacebuilddir)/libdevmapper.a
$(CC) $(CFLAGS) $(LDFLAGS) -static -L. -L$(interfacebuilddir) dmeventd.o \
-o $@ $(DL_LIBS) $(DMEVENT_LIBS) $(LIBS) $(STATIC_LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) -static -L. -L$(interfacebuilddir) -o $@ \
dmeventd.o $(DL_LIBS) $(LVMLIBS) $(LIBS) $(STATIC_LIBS)
ifeq ("@PKGCONFIG@", "yes")
INSTALL_LIB_TARGETS += install_pkgconfig

View File

@@ -62,8 +62,6 @@
#include <syslog.h>
#define DM_SIGNALED_EXIT 1
#define DM_SCHEDULED_EXIT 2
static volatile sig_atomic_t _exit_now = 0; /* set to '1' when signal is given to exit */
/* List (un)link macros. */
@@ -101,28 +99,13 @@ static time_t _idle_since = 0;
static char **_initial_registrations = 0;
/* FIXME Make configurable at runtime */
/* All libdm messages */
__attribute__((format(printf, 5, 6)))
static void _libdm_log(int level, const char *file, int line,
int dm_errno_or_class, const char *format, ...)
{
va_list ap;
va_start(ap, format);
dm_event_log("#dm", level, file, line, dm_errno_or_class, format, ap);
va_end(ap);
}
/* All dmeventd messages */
#undef LOG_MESG
#define LOG_MESG(l, f, ln, e, x...) _dmeventd_log(l, f, ln, e, ## x)
__attribute__((format(printf, 5, 6)))
__attribute__((format(printf, 4, 5)))
static void _dmeventd_log(int level, const char *file, int line,
int dm_errno_or_class, const char *format, ...)
const char *format, ...)
{
va_list ap;
va_start(ap, format);
dm_event_log("dmeventd", level, file, line, dm_errno_or_class, format, ap);
dm_event_log("dm", level, file, line, 0, format, ap);
va_end(ap);
}
@@ -470,7 +453,7 @@ static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *
/*
* We use a smaller stack since it gets preallocated in its entirety
*/
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE + getpagesize());
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
/*
* If no-one will be waiting, we need to detach.
@@ -752,9 +735,8 @@ static void _exit_timeout(void *unused __attribute__((unused)))
static void *_timeout_thread(void *unused __attribute__((unused)))
{
struct thread_status *thread;
struct timespec timeout, real_time;
struct timespec timeout;
time_t curr_time;
int ret;
DEBUGLOG("Timeout thread starting.");
pthread_cleanup_push(_exit_timeout, NULL);
@@ -763,16 +745,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
while (!dm_list_empty(&_timeout_registry)) {
timeout.tv_sec = 0;
timeout.tv_nsec = 0;
#ifndef HAVE_REALTIME
curr_time = time(NULL);
#else
if (clock_gettime(CLOCK_REALTIME, &real_time)) {
log_error("Failed to read clock_gettime().");
break;
}
/* 10ms back to the future */
curr_time = real_time.tv_sec + ((real_time.tv_nsec > (1000000000 - 10000000)) ? 1 : 0);
#endif
dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) {
if (thread->next_time <= curr_time) {
@@ -785,10 +758,7 @@ static void *_timeout_thread(void *unused __attribute__((unused)))
} else {
DEBUGLOG("Sending SIGALRM to Thr %x for timeout.",
(int) thread->thread);
ret = pthread_kill(thread->thread, SIGALRM);
if (ret && (ret != ESRCH))
log_error("Unable to wakeup Thr %x for timeout: %s.",
(int) thread->thread, strerror(ret));
pthread_kill(thread->thread, SIGALRM);
}
_unlock_mutex();
}
@@ -859,6 +829,17 @@ static void _print_sigset(const char *prefix, const sigset_t *sigset)
}
#endif
static sigset_t _unblock_sigalrm(void)
{
sigset_t set, old;
sigemptyset(&set);
sigaddset(&set, SIGALRM);
pthread_sigmask(SIG_UNBLOCK, &set, &old);
return old;
}
enum {
DM_WAIT_RETRY,
DM_WAIT_INTR,
@@ -868,7 +849,7 @@ enum {
/* Wait on a device until an event occurs. */
static int _event_wait(struct thread_status *thread)
{
sigset_t set, old;
sigset_t set;
int ret = DM_WAIT_RETRY;
struct dm_info info;
@@ -878,13 +859,7 @@ static int _event_wait(struct thread_status *thread)
* This is so that you can break out of waiting on an event,
* either for a timeout event, or to cancel the thread.
*/
sigemptyset(&old);
sigemptyset(&set);
sigaddset(&set, SIGALRM);
if (pthread_sigmask(SIG_UNBLOCK, &set, &old) != 0) {
log_sys_error("pthread_sigmask", "unblock alarm");
return ret; /* What better */
}
set = _unblock_sigalrm();
if (dm_task_run(thread->wait_task)) {
thread->current_events |= DM_EVENT_DEVICE_ERROR;
@@ -908,11 +883,10 @@ static int _event_wait(struct thread_status *thread)
}
}
if (pthread_sigmask(SIG_SETMASK, &old, NULL) != 0)
log_sys_error("pthread_sigmask", "block alarm");
pthread_sigmask(SIG_SETMASK, &set, NULL);
#ifdef DEBUG_SIGNALS
_print_sigset("dmeventd blocking ", &old);
_print_sigset("dmeventd blocking ", &set);
#endif
DEBUGLOG("Completed waitevent task for %s.", thread->device.name);
@@ -1766,7 +1740,7 @@ static void _init_thread_signals(void)
*/
static void _exit_handler(int sig __attribute__((unused)))
{
_exit_now = DM_SIGNALED_EXIT;
_exit_now = 1;
}
#ifdef __linux__
@@ -2030,8 +2004,8 @@ static int _reinstate_registrations(struct dm_event_fifos *fifos)
static void _restart_dmeventd(void)
{
struct dm_event_fifos fifos = {
.client = -1,
.server = -1,
.client = -1,
/* FIXME Make these either configurable or depend directly on dmeventd_path */
.client_path = DM_EVENT_FIFO_CLIENT,
.server_path = DM_EVENT_FIFO_SERVER
@@ -2222,7 +2196,7 @@ int main(int argc, char *argv[])
openlog("dmeventd", LOG_PID, LOG_DAEMON);
dm_event_log_set(_debug_level, _use_syslog);
dm_log_with_errno_init(_libdm_log);
dm_log_init(_dmeventd_log);
(void) dm_prepare_selinux_context(DMEVENTD_PIDFILE, S_IFREG);
if (dm_create_lockfile(DMEVENTD_PIDFILE) == 0)
@@ -2264,14 +2238,11 @@ int main(int argc, char *argv[])
for (;;) {
if (_idle_since) {
if (_exit_now) {
if (_exit_now == DM_SCHEDULED_EXIT)
break; /* Only prints shutdown message */
log_info("dmeventd detected break while being idle "
"for %ld second(s), exiting.",
(long) (time(NULL) - _idle_since));
break;
}
if (idle_exit_timeout) {
} else if (idle_exit_timeout) {
now = time(NULL);
if (now < _idle_since)
_idle_since = now; /* clock change? */
@@ -2282,14 +2253,15 @@ int main(int argc, char *argv[])
break;
}
}
} else if (_exit_now == DM_SIGNALED_EXIT) {
_exit_now = DM_SCHEDULED_EXIT;
} else if (_exit_now) {
_exit_now = 0;
/*
* When '_exit_now' is set, signal has been received,
* but can not simply exit unless all
* threads are done processing.
*/
log_info("dmeventd received break, scheduling exit.");
log_warn("WARNING: There are still devices being monitored.");
log_warn("WARNING: Refusing to exit.");
}
_process_request(&fifos);
_cleanup_unused_threads();

View File

@@ -250,9 +250,10 @@ static int _daemon_read(struct dm_event_fifos *fifos,
if (ret < 0) {
if ((errno == EINTR) || (errno == EAGAIN))
continue;
log_error("Unable to read from event server.");
return 0;
else {
log_error("Unable to read from event server.");
return 0;
}
}
bytes += ret;
@@ -328,9 +329,10 @@ static int _daemon_write(struct dm_event_fifos *fifos,
if (ret < 0) {
if ((errno == EINTR) || (errno == EAGAIN))
continue;
log_error("Unable to talk to event daemon.");
return 0;
else {
log_error("Unable to talk to event daemon.");
return 0;
}
}
bytes += ret;
@@ -452,8 +454,7 @@ static int _start_daemon(char *dmeventd_path, struct dm_event_fifos *fifos)
if (close(fifos->client))
log_sys_debug("close", fifos->client_path);
return 1;
}
if (errno != ENXIO && errno != ENOENT) {
} else if (errno != ENXIO && errno != ENOENT) {
/* problem */
log_sys_error("open", fifos->client_path);
return 0;
@@ -605,8 +606,8 @@ static int _do_event(int cmd, char *dmeventd_path, struct dm_event_daemon_messag
{
int ret;
struct dm_event_fifos fifos = {
.client = -1,
.server = -1,
.client = -1,
/* FIXME Make these either configurable or depend directly on dmeventd_path */
.client_path = DM_EVENT_FIFO_CLIENT,
.server_path = DM_EVENT_FIFO_SERVER
@@ -645,7 +646,6 @@ int dm_event_register_handler(const struct dm_event_handler *dmevh)
uuid = dm_task_get_uuid(dmt);
if (!strstr(dmevh->dso, "libdevmapper-event-lvm2thin.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2vdo.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2snapshot.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2mirror.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2raid.so"))
@@ -755,10 +755,11 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next)
uuid = dm_task_get_uuid(dmt);
/* FIXME Distinguish errors connecting to daemon */
if ((ret = _do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0))) {
if (_do_event(next ? DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE :
DM_EVENT_CMD_GET_REGISTERED_DEVICE, dmevh->dmeventd_path,
&msg, dmevh->dso, uuid, dmevh->mask, 0)) {
log_debug("%s: device not registered.", dm_task_get_name(dmt));
ret = -ENOENT;
goto fail;
}
@@ -864,38 +865,28 @@ void dm_event_log(const char *subsys, int level, const char *file,
int line, int dm_errno_or_class,
const char *format, va_list ap)
{
static int _abort_on_internal_errors = -1;
static pthread_mutex_t _log_mutex = PTHREAD_MUTEX_INITIALIZER;
static time_t start = 0;
const char *indent = "";
FILE *stream = log_stderr(level) ? stderr : stdout;
FILE *stream = stdout;
int prio;
time_t now;
int log_with_debug = 0;
if (subsys[0] == '#') {
/* Subsystems starting with '#' are logged
* only when debugging is enabled. */
log_with_debug++;
subsys++;
}
switch (log_level(level)) {
switch (level & ~(_LOG_STDERR | _LOG_ONCE)) {
case _LOG_DEBUG:
/* Never shown without -ddd */
if (_debug_level < 3)
return;
prio = LOG_DEBUG;
indent = " ";
break;
case _LOG_INFO:
if (log_with_debug && _debug_level < 2)
if (_debug_level < 2)
return;
prio = LOG_INFO;
indent = " ";
break;
case _LOG_NOTICE:
if (log_with_debug && _debug_level < 1)
if (_debug_level < 1)
return;
prio = LOG_NOTICE;
indent = " ";
@@ -921,13 +912,12 @@ void dm_event_log(const char *subsys, int level, const char *file,
if (!start)
start = now;
now -= start;
if (_debug_level)
fprintf(stream, "[%2d:%02d] %8x:%-6s%s",
(int)now / 60, (int)now % 60,
// TODO: Maybe use shorter ID
// ((int)(pthread_self()) >> 6) & 0xffff,
(int)pthread_self(), subsys,
(_debug_level > 3) ? "" : indent);
fprintf(stream, "[%2d:%02d] %8x:%-6s%s",
(int)now / 60, (int)now % 60,
// TODO: Maybe use shorter ID
// ((int)(pthread_self()) >> 6) & 0xffff,
(int)pthread_self(), subsys,
(_debug_level > 3) ? "" : indent);
if (_debug_level > 3)
fprintf(stream, "%28s:%4d %s", file, line, indent);
vfprintf(stream, _(format), ap);
@@ -936,15 +926,6 @@ void dm_event_log(const char *subsys, int level, const char *file,
}
pthread_mutex_unlock(&_log_mutex);
if (_abort_on_internal_errors < 0)
/* Set when env DM_ABORT_ON_INTERNAL_ERRORS is not "0" */
_abort_on_internal_errors =
strcmp(getenv("DM_ABORT_ON_INTERNAL_ERRORS") ? : "0", "0");
if (_abort_on_internal_errors &&
!strncmp(format, INTERNAL_ERROR, sizeof(INTERNAL_ERROR) - 1))
abort();
}
#if 0 /* left out for now */

View File

@@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2005, 2011 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -16,7 +16,27 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SUBDIRS += lvm2 snapshot raid thin mirror vdo
SUBDIRS += lvm2
ifneq ("@MIRRORS@", "none")
SUBDIRS += mirror
endif
ifneq ("@SNAPSHOTS@", "none")
SUBDIRS += snapshot
endif
ifneq ("@RAID@", "none")
SUBDIRS += raid
endif
ifneq ("@THIN@", "none")
SUBDIRS += thin
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = lvm2 mirror snapshot raid thin
endif
include $(top_builddir)/make.tmpl
@@ -24,4 +44,3 @@ snapshot: lvm2
mirror: lvm2
raid: lvm2
thin: lvm2
vdo: lvm2

View File

@@ -31,15 +31,8 @@ static pthread_mutex_t _register_mutex = PTHREAD_MUTEX_INITIALIZER;
static int _register_count = 0;
static struct dm_pool *_mem_pool = NULL;
static void *_lvm_handle = NULL;
static DM_LIST_INIT(_env_registry);
struct env_data {
struct dm_list list;
const char *cmd;
const char *data;
};
DM_EVENT_LOG_FN("#lvm")
DM_EVENT_LOG_FN("lvm")
static void _lvm2_print_log(int level, const char *file, int line,
int dm_errno_or_class, const char *msg)
@@ -71,7 +64,7 @@ int dmeventd_lvm2_init(void)
if (!_lvm_handle) {
lvm2_log_fn(_lvm2_print_log);
if (!(_lvm_handle = lvm2_init_threaded()))
if (!(_lvm_handle = lvm2_init()))
goto out;
/*
@@ -107,7 +100,6 @@ void dmeventd_lvm2_exit(void)
lvm2_run(_lvm_handle, "_memlock_dec");
dm_pool_destroy(_mem_pool);
_mem_pool = NULL;
dm_list_init(&_env_registry);
lvm2_exit(_lvm_handle);
_lvm_handle = NULL;
log_debug("lvm plugin exited.");
@@ -129,11 +121,8 @@ int dmeventd_lvm2_run(const char *cmdline)
int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
const char *cmd, const char *device)
{
static char _internal_prefix[] = "_dmeventd_";
char *vg = NULL, *lv = NULL, *layer;
int r;
struct env_data *env_data;
const char *env = NULL;
if (!dm_split_lvm_name(mem, device, &vg, &lv, &layer)) {
log_error("Unable to determine VG name from %s.",
@@ -146,39 +135,6 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
(layer = strstr(lv, "_mlog")))
*layer = '\0';
if (!strncmp(cmd, _internal_prefix, sizeof(_internal_prefix) - 1)) {
/* check if ENVVAR wasn't already resolved */
dm_list_iterate_items(env_data, &_env_registry)
if (!strcmp(cmd, env_data->cmd)) {
env = env_data->data;
break;
}
if (!env) {
/* run lvm2 command to find out setting value */
dmeventd_lvm2_lock();
if (!dmeventd_lvm2_run(cmd) ||
!(env = getenv(cmd))) {
dmeventd_lvm2_unlock();
log_error("Unable to find configured command.");
return 0;
}
/* output of internal command passed via env var */
env = dm_pool_strdup(_mem_pool, env); /* copy with lock */
dmeventd_lvm2_unlock();
if (!env ||
!(env_data = dm_pool_zalloc(_mem_pool, sizeof(*env_data))) ||
!(env_data->cmd = dm_pool_strdup(_mem_pool, cmd))) {
log_error("Unable to allocate env memory.");
return 0;
}
env_data->data = env;
/* add to ENVVAR registry */
dm_list_add(&_env_registry, &env_data->list);
}
cmd = env;
}
r = dm_snprintf(buffer, size, "%s %s/%s", cmd, vg, lv);
dm_pool_free(mem, vg);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -25,6 +25,7 @@
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
};
@@ -72,10 +73,8 @@ static int _get_mirror_event(struct dso_state *state, char *params)
unsigned i;
struct dm_status_mirror *ms;
if (!dm_get_status_mirror(state->mem, params, &ms)) {
log_error("Unable to parse mirror status string.");
return ME_IGNORE;
}
if (!dm_get_status_mirror(state->mem, params, &ms))
goto_out;
/* Check for bad mirror devices */
for (i = 0; i < ms->dev_count; ++i)
@@ -96,19 +95,27 @@ static int _get_mirror_event(struct dso_state *state, char *params)
dm_pool_free(state->mem, ms);
return r;
out:
log_error("Unable to parse mirror status string.");
return ME_IGNORE;
}
static int _remove_failed_devices(const char *cmd_lvconvert, const char *device)
static int _remove_failed_devices(const char *cmd_lvscan, const char *cmd_lvconvert)
{
int r;
if (!dmeventd_lvm2_run_with_lock(cmd_lvscan))
log_info("Re-scan of mirrored device failed.");
/* if repair goes OK, report success even if lvscan has failed */
if (!dmeventd_lvm2_run_with_lock(cmd_lvconvert)) {
log_error("Repair of mirrored device %s failed.", device);
return 0;
}
r = dmeventd_lvm2_run_with_lock(cmd_lvconvert);
log_info("Repair of mirrored device %s finished successfully.", device);
log_info("Repair of mirrored device %s.",
(r) ? "finished successfully" : "failed");
return 1;
return r;
}
void process_event(struct dm_task *dmt,
@@ -146,7 +153,8 @@ void process_event(struct dm_task *dmt,
break;
case ME_FAILURE:
log_error("Device failure in %s.", device);
if (!_remove_failed_devices(state->cmd_lvconvert, device))
if (!_remove_failed_devices(state->cmd_lvscan,
state->cmd_lvconvert))
/* FIXME Why are all the error return codes unused? Get rid of them? */
log_error("Failed to remove faulty devices in %s.",
device);
@@ -160,7 +168,7 @@ void process_event(struct dm_task *dmt,
break;
default:
/* FIXME Provide value then! */
log_warn("WARNING: %s received unknown event.", device);
log_info("Unknown event received.");
}
} while (next);
}
@@ -176,10 +184,17 @@ int register_device(const char *device,
if (!dmeventd_lvm2_init_with_pool("mirror_state", state))
goto_bad;
/* CANNOT use --config as this disables cached content */
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
@@ -189,9 +204,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor mirror %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -13,19 +13,14 @@
*/
#include "lib.h"
#include "defaults.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
/* Hold enough elements for the mximum number of RAID images */
#define RAID_DEVS_ELEMS ((DEFAULT_RAID_MAX_IMAGES + 63) / 64)
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
uint64_t raid_devs[RAID_DEVS_ELEMS];
int failed;
int warned;
};
DM_EVENT_LOG_FN("raid")
@@ -36,73 +31,32 @@ static int _process_raid_event(struct dso_state *state, char *params, const char
{
struct dm_status_raid *status;
const char *d;
int dead = 0, r = 1;
uint32_t dev;
if (!dm_get_status_raid(state->mem, params, &status)) {
log_error("Failed to process status line for %s.", device);
return 0;
}
d = status->dev_health;
while ((d = strchr(d, 'D'))) {
dev = (uint32_t)(d - status->dev_health);
if (!(state->raid_devs[dev / 64] & (UINT64_C(1) << (dev % 64)))) {
state->raid_devs[dev / 64] |= (UINT64_C(1) << (dev % 64));
log_warn("WARNING: Device #%u of %s array, %s, has failed.",
dev, status->raid_type, device);
}
d++;
dead = 1;
}
/*
* if we are converting from non-RAID to RAID (e.g. linear -> raid1)
* and too many original devices die, such that we cannot continue
* the "recover" operation, the sync action will go to "idle", the
* unsynced devs will remain at 'a', and the original devices will
* NOT SWITCH TO 'D', but will remain at 'A' - hoping to be revived.
*
* This is simply the way the kernel works...
*/
if (!strcmp(status->sync_action, "idle") &&
(status->dev_health[0] == 'a') &&
(status->insync_regions < status->total_regions)) {
log_error("Primary sources for new RAID, %s, have failed.",
device);
dead = 1; /* run it through LVM repair */
}
if (dead) {
/*
* Use the first event to run a repair ignoring any additonal ones.
*
* We presume lvconvert to do pre-repair
* checks to avoid bloat in this plugin.
*/
if (!state->warned && status->insync_regions < status->total_regions) {
state->warned = 1;
log_warn("WARNING: waiting for resynchronization to finish "
"before initiating repair on RAID device %s.", device);
/* Fall through to allow lvconvert to run. */
}
if ((d = strchr(status->dev_health, 'D'))) {
if (state->failed)
goto out; /* already reported */
log_error("Device #%d of %s array, %s, has failed.",
(int)(d - status->dev_health),
status->raid_type, device);
state->failed = 1;
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvscan))
log_warn("WARNING: Re-scan of RAID device %s failed.", device);
/* if repair goes OK, report success even if lvscan has failed */
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvconvert)) {
log_error("Repair of RAID device %s failed.", device);
r = 0;
log_info("Repair of RAID device %s failed.", device);
dm_pool_free(state->mem, status);
return 0;
}
} else {
state->failed = 0;
if (status->insync_regions == status->total_regions)
memset(&state->raid_devs, 0, sizeof(state->raid_devs));
log_info("%s array, %s, is %s in-sync.",
status->raid_type, device,
(status->insync_regions == status->total_regions) ? "now" : "not");
@@ -110,7 +64,7 @@ static int _process_raid_event(struct dso_state *state, char *params, const char
out:
dm_pool_free(state->mem, status);
return r;
return 1;
}
void process_event(struct dm_task *dmt,
@@ -155,9 +109,14 @@ int register_device(const char *device,
if (!dmeventd_lvm2_init_with_pool("raid_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device) ||
!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
@@ -167,9 +126,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor RAID %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@@ -16,7 +16,6 @@
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include <sys/sysmacros.h>
#include <sys/wait.h>
#include <stdarg.h>
#include <pthread.h>
@@ -231,7 +230,7 @@ void process_event(struct dm_task *dmt,
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Snapshot %s is now %.2f%% full.",
device, dm_percent_to_round_float(percent, 2));
device, dm_percent_to_float(percent));
/* Try to extend the snapshot, in accord with user-set policies */
if (!_extend(state->cmd_lvextend))
@@ -254,8 +253,10 @@ int register_device(const char *device,
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend,
sizeof(state->cmd_lvextend),
"lvextend --use-policies", device))
"lvextend --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
state->percent_check = CHECK_MINIMUM;
*user = state;
@@ -266,9 +267,6 @@ int register_device(const char *device,
bad:
log_error("Failed to monitor snapshot %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -18,6 +18,7 @@
#include <sys/wait.h>
#include <stdarg.h>
#include <pthread.h>
/* TODO - move this mountinfo code into library to be reusable */
#ifdef __linux__
@@ -39,118 +40,277 @@
#define UMOUNT_COMMAND "/bin/umount"
#define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */
#define MAX_FAILS (10)
#define THIN_DEBUG 0
struct dso_state {
struct dm_pool *mem;
int metadata_percent_check;
int metadata_percent;
int data_percent_check;
int data_percent;
uint64_t known_metadata_size;
uint64_t known_data_size;
unsigned fails;
unsigned max_fails;
int restore_sigset;
sigset_t old_sigset;
pid_t pid;
char *argv[3];
char *cmd_str;
char cmd_str[1024];
};
DM_EVENT_LOG_FN("thin")
static int _run_command(struct dso_state *state)
#define UUID_PREFIX "LVM-"
/* Figure out device UUID has LVM- prefix and is OPEN */
static int _has_unmountable_prefix(int major, int minor)
{
char val[16];
int i;
struct dm_task *dmt;
struct dm_info info;
const char *uuid;
int r = 0;
/* Mark for possible lvm2 command we are running from dmeventd
* lvm2 will not try to talk back to dmeventd while processing it */
(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
return_0;
if (state->data_percent) {
/* Prepare some known data to env vars for easy use */
if (dm_snprintf(val, sizeof(val), "%d",
state->data_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_DATA", val, 1);
if (dm_snprintf(val, sizeof(val), "%d",
state->metadata_percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_THIN_POOL_METADATA", val, 1);
} else {
/* For an error event it's for a user to check status and decide */
log_debug("Error event processing.");
if (!dm_task_set_major_minor(dmt, major, minor, 1))
goto_out;
if (!dm_task_no_flush(dmt))
stack;
if (!dm_task_run(dmt))
goto out;
if (!dm_task_get_info(dmt, &info))
goto out;
if (!info.exists || !info.open_count)
goto out; /* Not open -> not mounted */
if (!(uuid = dm_task_get_uuid(dmt)))
goto out;
/* Check it's public mountable LV
* has prefix LVM- and UUID size is 68 chars */
if (memcmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1) ||
strlen(uuid) != 68)
goto out;
#if THIN_DEBUG
log_debug("Found logical volume %s (%u:%u).", uuid, major, minor);
#endif
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/* Get dependencies for device, and try to find matching device */
static int _has_deps(const char *name, int tp_major, int tp_minor, int *dev_minor)
{
struct dm_task *dmt;
const struct dm_deps *deps;
struct dm_info info;
int major, minor;
int r = 0;
if (!(dmt = dm_task_create(DM_DEVICE_DEPS)))
return 0;
if (!dm_task_set_name(dmt, name))
goto out;
if (!dm_task_no_open_count(dmt))
goto out;
if (!dm_task_run(dmt))
goto out;
if (!dm_task_get_info(dmt, &info))
goto out;
if (!(deps = dm_task_get_deps(dmt)))
goto out;
if (!info.exists || deps->count != 1)
goto out;
major = (int) MAJOR(deps->device[0]);
minor = (int) MINOR(deps->device[0]);
if ((major != tp_major) || (minor != tp_minor))
goto out;
*dev_minor = info.minor;
if (!_has_unmountable_prefix(major, info.minor))
goto out;
#if THIN_DEBUG
{
char dev_name[PATH_MAX];
if (dm_device_get_name(major, minor, 0, dev_name, sizeof(dev_name)))
log_debug("Found %s (%u:%u) depends on %s.",
name, major, *dev_minor, dev_name);
}
#endif
r = 1;
out:
dm_task_destroy(dmt);
return r;
}
/* Get all active devices */
static int _find_all_devs(dm_bitset_t bs, int tp_major, int tp_minor)
{
struct dm_task *dmt;
struct dm_names *names;
unsigned next = 0;
int minor, r = 1;
if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
return 0;
if (!dm_task_run(dmt)) {
r = 0;
goto out;
}
log_verbose("Executing command: %s", state->cmd_str);
if (!(names = dm_task_get_names(dmt))) {
r = 0;
goto out;
}
/* TODO:
* Support parallel run of 'task' and it's waitpid maintainence
* ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running
*/
if (!(state->pid = fork())) {
/* child */
(void) close(0);
for (i = 3; i < 255; ++i) (void) close(i);
execvp(state->argv[0], state->argv);
_exit(errno);
} else if (state->pid == -1) {
log_error("Can't fork command %s.", state->cmd_str);
state->fails = 1;
return 0;
if (!names->dev)
goto out;
do {
names = (struct dm_names *)((char *) names + next);
if (_has_deps(names->name, tp_major, tp_minor, &minor))
dm_bit_set(bs, minor);
next = names->next;
} while (next);
out:
dm_task_destroy(dmt);
return r;
}
static int _run(const char *cmd, ...)
{
va_list ap;
int argc = 1; /* for argv[0], i.e. cmd */
int i = 0;
const char **argv;
pid_t pid = fork();
int status;
if (pid == 0) { /* child */
va_start(ap, cmd);
while (va_arg(ap, const char *))
++argc;
va_end(ap);
/* + 1 for the terminating NULL */
argv = alloca(sizeof(const char *) * (argc + 1));
argv[0] = cmd;
va_start(ap, cmd);
while ((argv[++i] = va_arg(ap, const char *)));
va_end(ap);
execvp(cmd, (char **)argv);
log_sys_error("exec", cmd);
exit(127);
}
if (pid > 0) { /* parent */
if (waitpid(pid, &status, 0) != pid)
return 0; /* waitpid failed */
if (!WIFEXITED(status) || WEXITSTATUS(status))
return 0; /* the child failed */
}
if (pid < 0)
return 0; /* fork failed */
return 1; /* all good */
}
struct mountinfo_s {
const char *device;
struct dm_info info;
dm_bitset_t minors; /* Bitset for active thin pool minors */
};
static int _umount_device(char *buffer, unsigned major, unsigned minor,
char *target, void *cb_data)
{
struct mountinfo_s *data = cb_data;
char *words[10];
if ((major == data->info.major) && dm_bit(data->minors, minor)) {
if (dm_split_words(buffer, DM_ARRAY_SIZE(words), 0, words) < DM_ARRAY_SIZE(words))
words[9] = NULL; /* just don't show device name */
log_info("Unmounting thin %s (%d:%d) of thin-pool %s (%u:%u) from mount point \"%s\".",
words[9] ? : "", major, minor, data->device,
data->info.major, data->info.minor,
target);
if (!_run(UMOUNT_COMMAND, "-fl", target, NULL))
log_error("Failed to lazy umount thin %s (%d:%d) from %s: %s.",
words[9], major, minor, target, strerror(errno));
}
return 1;
}
/*
* Find all thin pool LV users and try to umount them.
* TODO: work with read-only thin pool support
*/
static void _umount(struct dm_task *dmt)
{
/* TODO: Convert to use hash to reduce memory usage */
static const size_t MINORS = (1U << 20); /* 20 bit */
struct mountinfo_s data = { NULL };
if (!dm_task_get_info(dmt, &data.info))
return;
data.device = dm_task_get_name(dmt);
if (!(data.minors = dm_bitset_create(NULL, MINORS))) {
log_error("Failed to allocate bitset. Not unmounting %s.", data.device);
goto out;
}
if (!_find_all_devs(data.minors, data.info.major, data.info.minor)) {
log_error("Failed to detect mounted volumes for %s.", data.device);
goto out;
}
if (!dm_mountinfo_read(_umount_device, &data)) {
log_error("Could not parse mountinfo file.");
goto out;
}
out:
if (data.minors)
dm_bitset_destroy(data.minors);
}
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
{
#if THIN_DEBUG
log_debug("dmeventd executes: %s.", state->cmd_str);
log_info("dmeventd executes: %s.", state->cmd_str);
#endif
if (state->argv[0])
return _run_command(state);
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
log_error("Failed command for %s.", dm_task_get_name(dmt));
state->fails = 1;
log_error("Failed to extend thin pool %s.",
dm_task_get_name(dmt));
state->fails++;
return 0;
}
state->fails = 0;
return 1;
}
/* Check if executed command has finished
* Only 1 command may run */
static int _wait_for_pid(struct dso_state *state)
{
int status = 0;
if (state->pid == -1)
return 1;
if (!waitpid(state->pid, &status, WNOHANG))
return 0;
/* Wait for finish */
if (WIFEXITED(status)) {
log_verbose("Child %d exited with status %d.",
state->pid, WEXITSTATUS(status));
state->fails = WEXITSTATUS(status) ? 1 : 0;
} else {
if (WIFSIGNALED(status))
log_verbose("Child %d was terminated with status %d.",
state->pid, WTERMSIG(status));
state->fails = 1;
}
state->pid = -1;
return 1;
}
@@ -159,6 +319,7 @@ void process_event(struct dm_task *dmt,
void **user)
{
const char *device = dm_task_get_name(dmt);
int percent;
struct dso_state *state = *user;
struct dm_status_thin_pool *tps = NULL;
void *next = NULL;
@@ -166,48 +327,25 @@ void process_event(struct dm_task *dmt,
char *target_type = NULL;
char *params;
int needs_policy = 0;
struct dm_task *new_dmt = NULL;
int needs_umount = 0;
#if THIN_DEBUG
log_debug("Watch for tp-data:%.2f%% tp-metadata:%.2f%%.",
dm_percent_to_round_float(state->data_percent_check, 2),
dm_percent_to_round_float(state->metadata_percent_check, 2));
dm_percent_to_float(state->data_percent_check),
dm_percent_to_float(state->metadata_percent_check));
#endif
if (!_wait_for_pid(state)) {
log_warn("WARNING: Skipping event, child %d is still running (%s).",
state->pid, state->cmd_str);
return;
}
#if 0
/* No longer monitoring, waiting for remove */
if (!state->meta_percent_check && !state->data_percent_check)
return;
#endif
if (event & DM_EVENT_DEVICE_ERROR) {
/* Error -> no need to check and do instant resize */
state->data_percent = state->metadata_percent = 0;
if (_use_policy(dmt, state))
goto out;
stack;
/*
* Rather update oldish status
* since after 'command' processing
* percentage info could have changed a lot.
* If we would get above UMOUNT_THRESH
* we would wait for next sigalarm.
*/
if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS)))
goto_out;
if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt)))
goto_out;
/* Non-blocking status read */
if (!dm_task_no_flush(new_dmt))
log_warn("WARNING: Can't set no_flush for dm status.");
if (!dm_task_run(new_dmt))
goto_out;
dmt = new_dmt;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
@@ -219,6 +357,7 @@ void process_event(struct dm_task *dmt,
if (!dm_get_status_thin_pool(state->mem, params, &tps)) {
log_error("Failed to parse status.");
needs_umount = 1;
goto out;
}
@@ -233,110 +372,67 @@ void process_event(struct dm_task *dmt,
if (state->known_metadata_size != tps->total_metadata_blocks) {
state->metadata_percent_check = CHECK_MINIMUM;
state->known_metadata_size = tps->total_metadata_blocks;
state->fails = 0;
}
if (state->known_data_size != tps->total_data_blocks) {
state->data_percent_check = CHECK_MINIMUM;
state->known_data_size = tps->total_data_blocks;
state->fails = 0;
}
/*
* Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy
* action is called for: >50%, >55% ... >95%, 100%
*/
state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
if ((state->metadata_percent > WARNING_THRESH) &&
(state->metadata_percent > state->metadata_percent_check))
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
device, dm_percent_to_round_float(state->metadata_percent, 2));
if (state->metadata_percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->metadata_percent > state->metadata_percent_check)
needs_policy = 1;
state->metadata_percent_check = (state->metadata_percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->metadata_percent_check == DM_PERCENT_100)
state->metadata_percent_check--; /* Can't get bigger then 100% */
} else
state->metadata_percent_check = CHECK_MINIMUM;
percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
if (percent >= state->metadata_percent_check) {
/*
* Usage has raised more than CHECK_STEP since the last
* time. Run actions.
*/
state->metadata_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
state->data_percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
if ((state->data_percent > WARNING_THRESH) &&
(state->data_percent > state->data_percent_check))
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
device, dm_percent_to_round_float(state->data_percent, 2));
if (state->data_percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->data_percent > state->data_percent_check)
needs_policy = 1;
state->data_percent_check = (state->data_percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->data_percent_check == DM_PERCENT_100)
state->data_percent_check--; /* Can't get bigger then 100% */
} else
state->data_percent_check = CHECK_MINIMUM;
/* FIXME: extension of metadata needs to be written! */
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
* Avoids too high number of error retries, yet shows some status messages in log regularly.
* i.e. PV could have been pvmoved and VG/LV was locked for a while...
*/
if (state->fails) {
if (state->fails++ <= state->max_fails) {
log_debug("Postponing frequently failing policy (%u <= %u).",
state->fails - 1, state->max_fails);
goto out;
}
if (state->max_fails < MAX_FAILS)
state->max_fails <<= 1;
state->fails = needs_policy = 1; /* Retry failing command */
} else
state->max_fails = 1; /* Reset on success */
if (percent >= UMOUNT_THRESH)
needs_umount = 1;
}
if (needs_policy)
_use_policy(dmt, state);
percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
if (percent >= state->data_percent_check) {
/*
* Usage has raised more than CHECK_STEP since
* the last time. Run actions.
*/
state->data_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
if (percent >= UMOUNT_THRESH)
needs_umount = 1;
}
if (needs_policy &&
_use_policy(dmt, state))
needs_umount = 0; /* No umount when command was successful */
out:
if (needs_umount) {
_umount(dmt);
/* Until something changes, do not retry any more actions */
state->data_percent_check = state->metadata_percent_check = (DM_PERCENT_1 * 101);
}
if (tps)
dm_pool_free(state->mem, tps);
if (new_dmt)
dm_task_destroy(new_dmt);
}
/* Handle SIGCHLD for a thread */
static void _sig_child(int signum __attribute__((unused)))
{
/* empty SIG_IGN */;
}
/* Setup handler for SIGCHLD when executing external command
* to get quick 'waitpid()' reaction
* It will interrupt syscall just like SIGALRM and
* invoke process_event().
*/
static void _init_thread_signals(struct dso_state *state)
{
struct sigaction act = { .sa_handler = _sig_child };
sigset_t my_sigset;
sigemptyset(&my_sigset);
if (sigaction(SIGCHLD, &act, NULL))
log_warn("WARNING: Failed to set SIGCHLD action.");
else if (sigaddset(&my_sigset, SIGCHLD))
log_warn("WARNING: Failed to add SIGCHLD to set.");
else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
log_warn("WARNING: Failed to unblock SIGCHLD.");
else
state->restore_sigset = 1;
}
static void _restore_thread_signals(struct dso_state *state)
{
if (state->restore_sigset &&
pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
log_warn("WARNING: Failed to block SIGCHLD.");
if (state->fails >= MAX_FAILS) {
log_warn("WARNING: Dropping monitoring of %s. "
"lvm2 command fails too often (%u times in raw).",
device, state->fails);
pthread_kill(pthread_self(), SIGALRM);
}
}
int register_device(const char *device,
@@ -346,55 +442,27 @@ int register_device(const char *device,
void **user)
{
struct dso_state *state;
char *str;
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str),
"_dmeventd_thin_command", device))
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
sizeof(state->cmd_str),
"lvextend --use-policies",
device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (strncmp(cmd_str, "lvm ", 4) == 0) {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
log_error("Failed to copy lvm command.");
goto bad;
}
} else if (cmd_str[0] == '/') {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
log_error("Failed to copy thin command.");
goto bad;
}
/* Find last space before 'vg/lv' */
if (!(str = strrchr(state->cmd_str, ' ')))
goto inval;
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
str - state->cmd_str))) {
log_error("Failed to copy command.");
goto bad;
}
state->argv[1] = str + 1; /* 1 argument - vg/lv */
_init_thread_signals(state);
} else /* Unuspported command format */
goto inval;
state->pid = -1;
state->metadata_percent_check = CHECK_MINIMUM;
state->data_percent_check = CHECK_MINIMUM;
*user = state;
log_info("Monitoring thin pool %s.", device);
log_info("Monitoring thin %s.", device);
return 1;
inval:
log_error("Invalid command for monitoring: %s.", cmd_str);
bad:
log_error("Failed to monitor thin pool %s.", device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
log_error("Failed to monitor thin %s.", device);
return 0;
}
@@ -406,31 +474,9 @@ int unregister_device(const char *device,
void **user)
{
struct dso_state *state = *user;
int i;
for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
if (i == 0)
/* Give it 2 seconds, then try to terminate & kill it */
log_verbose("Child %d still not finished (%s) waiting.",
state->pid, state->cmd_str);
else if (i == 3) {
log_warn("WARNING: Terminating child %d.", state->pid);
kill(state->pid, SIGINT);
kill(state->pid, SIGTERM);
} else if (i == 5) {
log_warn("WARNING: Killing child %d.", state->pid);
kill(state->pid, SIGKILL);
}
sleep(1);
}
if (state->pid != -1)
log_warn("WARNING: Cannot kill child %d!", state->pid);
_restore_thread_signals(state);
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring thin pool %s.", device);
log_info("No longer monitoring thin %s.", device);
return 1;
}

View File

@@ -1,3 +0,0 @@
process_event
register_device
unregister_device

View File

@@ -1,419 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include <sys/wait.h>
#include <stdarg.h>
/* First warning when VDO pool is 80% full. */
#define WARNING_THRESH (DM_PERCENT_1 * 80)
/* Run a check every 5%. */
#define CHECK_STEP (DM_PERCENT_1 * 5)
/* Do not bother checking VDO pool is less than 50% full. */
#define CHECK_MINIMUM (DM_PERCENT_1 * 50)
#define MAX_FAILS (256) /* ~42 mins between cmd call retry with 10s delay */
#define VDO_DEBUG 0
struct dso_state {
struct dm_pool *mem;
int percent_check;
int percent;
uint64_t known_data_size;
unsigned fails;
unsigned max_fails;
int restore_sigset;
sigset_t old_sigset;
pid_t pid;
char *argv[3];
const char *cmd_str;
const char *name;
};
struct vdo_status {
uint64_t used_blocks;
uint64_t total_blocks;
};
static int _vdo_status_parse(const char *params, struct vdo_status *status)
{
if (sscanf(params, "%*s %*s %*s %*s %*s %" PRIu64 " %" PRIu64,
&status->used_blocks,
&status->total_blocks) < 2) {
log_error("Failed to parse vdo params: %s.", params);
return 0;
}
return 1;
}
DM_EVENT_LOG_FN("vdo")
static int _run_command(struct dso_state *state)
{
char val[16];
int i;
/* Mark for possible lvm2 command we are running from dmeventd
* lvm2 will not try to talk back to dmeventd while processing it */
(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
if (state->percent) {
/* Prepare some known data to env vars for easy use */
if (dm_snprintf(val, sizeof(val), "%d",
state->percent / DM_PERCENT_1) != -1)
(void) setenv("DMEVENTD_VDO_POOL", val, 1);
} else {
/* For an error event it's for a user to check status and decide */
log_debug("Error event processing.");
}
log_verbose("Executing command: %s", state->cmd_str);
/* TODO:
* Support parallel run of 'task' and it's waitpid maintainence
* ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running
*/
if (!(state->pid = fork())) {
/* child */
(void) close(0);
for (i = 3; i < 255; ++i) (void) close(i);
execvp(state->argv[0], state->argv);
_exit(errno);
} else if (state->pid == -1) {
log_error("Can't fork command %s.", state->cmd_str);
state->fails = 1;
return 0;
}
return 1;
}
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
{
#if VDO_DEBUG
log_debug("dmeventd executes: %s.", state->cmd_str);
#endif
if (state->argv[0])
return _run_command(state);
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
log_error("Failed command for %s.", dm_task_get_name(dmt));
state->fails = 1;
return 0;
}
state->fails = 0;
return 1;
}
/* Check if executed command has finished
* Only 1 command may run */
static int _wait_for_pid(struct dso_state *state)
{
int status = 0;
if (state->pid == -1)
return 1;
if (!waitpid(state->pid, &status, WNOHANG))
return 0;
/* Wait for finish */
if (WIFEXITED(status)) {
log_verbose("Child %d exited with status %d.",
state->pid, WEXITSTATUS(status));
state->fails = WEXITSTATUS(status) ? 1 : 0;
} else {
if (WIFSIGNALED(status))
log_verbose("Child %d was terminated with status %d.",
state->pid, WTERMSIG(status));
state->fails = 1;
}
state->pid = -1;
return 1;
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
{
const char *device = dm_task_get_name(dmt);
struct dso_state *state = *user;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
int needs_policy = 0;
struct dm_task *new_dmt = NULL;
struct vdo_status status;
#if VDO_DEBUG
log_debug("Watch for VDO %s:%.2f%%.", state->name,
dm_percent_to_round_float(state->percent_check, 2));
#endif
if (!_wait_for_pid(state)) {
log_warn("WARNING: Skipping event, child %d is still running (%s).",
state->pid, state->cmd_str);
return;
}
if (event & DM_EVENT_DEVICE_ERROR) {
#if VDO_DEBUG
log_debug("VDO event error.");
#endif
/* Error -> no need to check and do instant resize */
state->percent = 0;
if (_use_policy(dmt, state))
goto out;
stack;
if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS)))
goto_out;
if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt)))
goto_out;
/* Non-blocking status read */
if (!dm_task_no_flush(new_dmt))
log_warn("WARNING: Can't set no_flush for dm status.");
if (!dm_task_run(new_dmt))
goto_out;
dmt = new_dmt;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type || (strcmp(target_type, "vdo") != 0)) {
log_error("Invalid target type.");
goto out;
}
if (!_vdo_status_parse(params, &status)) {
log_error("Failed to parse status.");
goto out;
}
state->percent = dm_make_percent(status.used_blocks,
status.total_blocks);
#if VDO_DEBUG
log_debug("VDO %s status %.2f%% " FMTu64 "/" FMTu64 ".",
state->name, dm_percent_to_round_float(state->percent, 2),
status.used_blocks, status.total_blocks);
#endif
/* VDO pool size had changed. Clear the threshold. */
if (state->known_data_size != status.total_blocks) {
state->percent_check = CHECK_MINIMUM;
state->known_data_size = status.total_blocks;
state->fails = 0;
}
/*
* Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy
* action is called for: >50%, >55% ... >95%, 100%
*/
if ((state->percent > WARNING_THRESH) &&
(state->percent > state->percent_check))
log_warn("WARNING: VDO %s %s is now %.2f%% full.",
state->name, device,
dm_percent_to_round_float(state->percent, 2));
if (state->percent > CHECK_MINIMUM) {
/* Run action when usage raised more than CHECK_STEP since the last time */
if (state->percent > state->percent_check)
needs_policy = 1;
state->percent_check = (state->percent / CHECK_STEP + 1) * CHECK_STEP;
if (state->percent_check == DM_PERCENT_100)
state->percent_check--; /* Can't get bigger then 100% */
} else
state->percent_check = CHECK_MINIMUM;
/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
* Avoids too high number of error retries, yet shows some status messages in log regularly.
* i.e. PV could have been pvmoved and VG/LV was locked for a while...
*/
if (state->fails) {
if (state->fails++ <= state->max_fails) {
log_debug("Postponing frequently failing policy (%u <= %u).",
state->fails - 1, state->max_fails);
goto out;
}
if (state->max_fails < MAX_FAILS)
state->max_fails <<= 1;
state->fails = needs_policy = 1; /* Retry failing command */
} else
state->max_fails = 1; /* Reset on success */
/* FIXME: ATM nothing can be done, drop 0, once it becomes useful */
if (0 && needs_policy)
_use_policy(dmt, state);
out:
if (new_dmt)
dm_task_destroy(new_dmt);
}
/* Handle SIGCHLD for a thread */
static void _sig_child(int signum __attribute__((unused)))
{
/* empty SIG_IGN */;
}
/* Setup handler for SIGCHLD when executing external command
* to get quick 'waitpid()' reaction
* It will interrupt syscall just like SIGALRM and
* invoke process_event().
*/
static void _init_thread_signals(struct dso_state *state)
{
struct sigaction act = { .sa_handler = _sig_child };
sigset_t my_sigset;
sigemptyset(&my_sigset);
if (sigaction(SIGCHLD, &act, NULL))
log_warn("WARNING: Failed to set SIGCHLD action.");
else if (sigaddset(&my_sigset, SIGCHLD))
log_warn("WARNING: Failed to add SIGCHLD to set.");
else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
log_warn("WARNING: Failed to unblock SIGCHLD.");
else
state->restore_sigset = 1;
}
static void _restore_thread_signals(struct dso_state *state)
{
if (state->restore_sigset &&
pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
log_warn("WARNING: Failed to block SIGCHLD.");
}
int register_device(const char *device,
const char *uuid,
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
{
struct dso_state *state;
const char *cmd;
char *str;
char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
const char *name = "pool";
if (!dmeventd_lvm2_init_with_pool("vdo_pool_state", state))
goto_bad;
state->cmd_str = "";
/* Search for command for LVM- prefixed devices only */
cmd = (strncmp(uuid, "LVM-", 4) == 0) ? "_dmeventd_vdo_command" : "";
if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str), cmd, device))
goto_bad;
if (strncmp(cmd_str, "lvm ", 4) == 0) {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
log_error("Failed to copy lvm VDO command.");
goto bad;
}
} else if (cmd_str[0] == '/') {
if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
log_error("Failed to copy VDO command.");
goto bad;
}
/* Find last space before 'vg/lv' */
if (!(str = strrchr(state->cmd_str, ' ')))
goto inval;
if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
str - state->cmd_str))) {
log_error("Failed to copy command.");
goto bad;
}
state->argv[1] = str + 1; /* 1 argument - vg/lv */
_init_thread_signals(state);
} else if (cmd[0] == 0) {
state->name = "volume"; /* What to use with 'others?' */
} else/* Unuspported command format */
goto inval;
state->pid = -1;
state->name = name;
*user = state;
log_info("Monitoring VDO %s %s.", name, device);
return 1;
inval:
log_error("Invalid command for monitoring: %s.", cmd_str);
bad:
log_error("Failed to monitor VDO %s %s.", name, device);
if (state)
dmeventd_lvm2_exit_with_pool(state);
return 0;
}
int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
{
struct dso_state *state = *user;
const char *name = state->name;
int i;
for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
if (i == 0)
/* Give it 2 seconds, then try to terminate & kill it */
log_verbose("Child %d still not finished (%s) waiting.",
state->pid, state->cmd_str);
else if (i == 3) {
log_warn("WARNING: Terminating child %d.", state->pid);
kill(state->pid, SIGINT);
kill(state->pid, SIGTERM);
} else if (i == 5) {
log_warn("WARNING: Killing child %d.", state->pid);
kill(state->pid, SIGKILL);
}
sleep(1);
}
if (state->pid != -1)
log_warn("WARNING: Cannot kill child %d!", state->pid);
_restore_thread_signals(state);
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring VDO %s %s.", name, device);
return 1;
}

View File

@@ -1 +0,0 @@
dmfilemapd

View File

@@ -1,66 +0,0 @@
#
# Copyright (C) 2016 Red Hat, Inc. All rights reserved.
#
# This file is part of the device-mapper userspace tools.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU Lesser General Public License v.2.1.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES = dmfilemapd.c
TARGETS = dmfilemapd
.PHONY: install_dmfilemapd install_dmfilemapd_static
INSTALL_DMFILEMAPD_TARGETS = install_dmfilemapd_dynamic
CLEAN_TARGETS = dmfilemapd.static
CFLOW_LIST = $(SOURCES)
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
CFLOW_TARGET = dmfilemapd
include $(top_builddir)/make.tmpl
all: device-mapper
device-mapper: $(TARGETS)
CFLAGS_dmfilemapd.o += $(EXTRA_EXEC_CFLAGS)
LIBS += -ldevmapper
dmfilemapd: $(LIB_SHARED) dmfilemapd.o
$(CC) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) \
-o $@ dmfilemapd.o $(DL_LIBS) $(LIBS)
dmfilemapd.static: $(LIB_STATIC) dmfilemapd.o $(interfacebuilddir)/libdevmapper.a
$(CC) $(CFLAGS) $(LDFLAGS) $(ELDFLAGS) -static -L$(interfacebuilddir) \
-o $@ dmfilemapd.o $(DL_LIBS) $(LIBS) $(STATIC_LIBS)
ifneq ("$(CFLOW_CMD)", "")
CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES))
-include $(top_builddir)/libdm/libdevmapper.cflow
-include $(top_builddir)/lib/liblvm-internal.cflow
-include $(top_builddir)/lib/liblvm2cmd.cflow
-include $(top_builddir)/daemons/dmfilemapd/$(LIB_NAME).cflow
endif
install_dmfilemapd_dynamic: dmfilemapd
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
install_dmfilemapd_static: dmfilemapd.static
$(INSTALL_PROGRAM) -D $< $(staticdir)/$(<F)
install_dmfilemapd: $(INSTALL_DMFILEMAPD_TARGETS)
install: install_dmfilemapd
install_device-mapper: install_dmfilemapd

View File

@@ -1,836 +0,0 @@
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
* It includes tree drawing code based on pstree: http://psmisc.sourceforge.net/
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "tool.h"
#include "dm-logging.h"
#include "defaults.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/inotify.h>
#include <dirent.h>
#include <ctype.h>
#ifdef __linux__
# include "kdev_t.h"
#else
# define MAJOR(x) major((x))
# define MINOR(x) minor((x))
# define MKDEV(x,y) makedev((dev_t)(x),(dev_t)(y))
#endif
/* limit to two updates/sec */
#define FILEMAPD_WAIT_USECS 500000
/* how long to wait for unlinked files */
#define FILEMAPD_NOFILE_WAIT_USECS 100000
#define FILEMAPD_NOFILE_WAIT_TRIES 10
struct filemap_monitor {
dm_filemapd_mode_t mode;
const char *program_id;
uint64_t group_id;
char *path;
int fd;
int inotify_fd;
int inotify_watch_fd;
/* monitoring heuristics */
int64_t blocks; /* allocated blocks, from stat.st_blocks */
uint64_t nr_regions;
int deleted;
};
static int _foreground;
static int _verbose;
const char *const _usage = "dmfilemapd <fd> <group_id> <abs_path> <mode> "
"[<foreground>[<log_level>]]";
/*
* Daemon logging. By default, all messages are thrown away: messages
* are only written to the terminal if the daemon is run in the foreground.
*/
__attribute__((format(printf, 5, 0)))
static void _dmfilemapd_log_line(int level,
const char *file __attribute__((unused)),
int line __attribute__((unused)),
int dm_errno_or_class,
const char *f, va_list ap)
{
static int _abort_on_internal_errors = -1;
FILE *out = log_stderr(level) ? stderr : stdout;
level = log_level(level);
if (level <= _LOG_WARN || _verbose) {
if (level < _LOG_WARN)
out = stderr;
vfprintf(out, f, ap);
fputc('\n', out);
}
if (_abort_on_internal_errors < 0)
/* Set when env DM_ABORT_ON_INTERNAL_ERRORS is not "0" */
_abort_on_internal_errors =
strcmp(getenv("DM_ABORT_ON_INTERNAL_ERRORS") ? : "0", "0");
if (_abort_on_internal_errors &&
!strncmp(f, INTERNAL_ERROR, sizeof(INTERNAL_ERROR) - 1))
abort();
}
__attribute__((format(printf, 5, 6)))
static void _dmfilemapd_log_with_errno(int level,
const char *file, int line,
int dm_errno_or_class,
const char *f, ...)
{
va_list ap;
va_start(ap, f);
_dmfilemapd_log_line(level, file, line, dm_errno_or_class, f, ap);
va_end(ap);
}
/*
* Only used for reporting errors before daemonise().
*/
__attribute__((format(printf, 1, 2)))
static void _early_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
fputc('\n', stderr);
va_end(ap);
}
static void _setup_logging(void)
{
dm_log_init_verbose(_verbose - 1);
dm_log_with_errno_init(_dmfilemapd_log_with_errno);
}
#define PROC_FD_DELETED_STR "(deleted)"
/*
* Scan the /proc/<pid>/fd directory for pid and check for an fd
* symlink whose contents match path.
*/
static int _is_open_in_pid(pid_t pid, const char *path)
{
char deleted_path[PATH_MAX + sizeof(PROC_FD_DELETED_STR)];
struct dirent *pid_dp = NULL;
char path_buf[PATH_MAX];
char link_buf[PATH_MAX];
DIR *pid_d = NULL;
ssize_t len;
if (pid == getpid())
return 0;
if (dm_snprintf(path_buf, sizeof(path_buf),
DEFAULT_PROC_DIR "%d/fd", pid) < 0) {
log_error("Could not format pid path.");
return 0;
}
/*
* Test for the kernel 'file (deleted)' form when scanning.
*/
if (dm_snprintf(deleted_path, sizeof(deleted_path), "%s %s",
path, PROC_FD_DELETED_STR) < 0) {
log_error("Could not format check path.");
return 0;
}
pid_d = opendir(path_buf);
if (!pid_d) {
log_error("Could not open proc path: %s.", path_buf);
return 0;
}
while ((pid_dp = readdir(pid_d)) != NULL) {
if (pid_dp->d_name[0] == '.')
continue;
if ((len = readlinkat(dirfd(pid_d), pid_dp->d_name, link_buf,
sizeof(link_buf))) < 0) {
log_error("readlink failed for " DEFAULT_PROC_DIR
"/%d/fd/.", pid);
goto bad;
}
link_buf[len] = '\0';
if (!strcmp(deleted_path, link_buf)) {
if (closedir(pid_d))
log_sys_error("closedir", path_buf);
return 1;
}
}
bad:
if (closedir(pid_d))
log_sys_error("closedir", path_buf);
return 0;
}
/*
* Attempt to determine whether a file is open by any process by
* scanning symbolic links in /proc/<pid>/fd.
*
* This is a heuristic since it cannot guarantee to detect brief
* access in all cases: a process that opens and then closes the
* file rapidly may never be seen by the scan.
*
* The method will also give false-positives if a process exists
* that has a deleted file open that had the same path, but a
* different inode number, to the file being monitored.
*
* For this reason the daemon only uses _is_open() for unlinked
* files when the mode is DM_FILEMAPD_FOLLOW_INODE, since these
* files can no longer be newly opened by processes.
*
* In this situation !is_open(path) provides an indication that
* the daemon should shut down: the file has been unlinked from
* the file system and we appear to hold the final reference.
*/
static int _is_open(const char *path)
{
struct dirent *proc_dp = NULL;
DIR *proc_d = NULL;
pid_t pid;
proc_d = opendir(DEFAULT_PROC_DIR);
if (!proc_d)
return 0;
while ((proc_dp = readdir(proc_d)) != NULL) {
if (!isdigit(proc_dp->d_name[0]))
continue;
errno = 0;
pid = (pid_t) strtol(proc_dp->d_name, NULL, 10);
if (errno || !pid)
continue;
if (_is_open_in_pid(pid, path)) {
if (closedir(proc_d))
log_sys_error("closedir", DEFAULT_PROC_DIR);
return 1;
}
}
if (closedir(proc_d))
log_sys_error("closedir", DEFAULT_PROC_DIR);
return 0;
}
static void _filemap_monitor_wait(uint64_t usecs)
{
if (_verbose) {
if (usecs == FILEMAPD_WAIT_USECS)
log_very_verbose("Waiting for check interval");
if (usecs == FILEMAPD_NOFILE_WAIT_USECS)
log_very_verbose("Waiting for unlinked path");
}
usleep((useconds_t) usecs);
}
static int _parse_args(int argc, char **argv, struct filemap_monitor *fm)
{
char *endptr;
/* we don't care what is in argv[0]. */
argc--;
argv++;
if (argc < 5) {
_early_log("Wrong number of arguments.");
_early_log("usage: %s", _usage);
return 0;
}
/*
* We don't know the true nr_regions at daemon start time,
* and it is not worth a dm_stats_list()/group walk to count:
* we can assume that there is at least one region or the
* daemon would not have been started.
*
* A correct value will be obtained following the first update
* of the group's regions.
*/
fm->nr_regions = 1;
/* parse <fd> */
errno = 0;
fm->fd = (int) strtol(argv[0], &endptr, 10);
if (errno || *endptr) {
_early_log("Could not parse file descriptor: %s", argv[0]);
return 0;
}
argc--;
argv++;
/* parse <group_id> */
errno = 0;
fm->group_id = strtoull(argv[0], &endptr, 10);
if (*endptr || errno) {
_early_log("Could not parse group identifier: %s", argv[0]);
return 0;
}
argc--;
argv++;
/* parse <path> */
if (!argv[0] || !strlen(argv[0])) {
_early_log("Path argument is required.");
return 0;
}
if (*argv[0] != '/') {
_early_log("Path argument must specify an absolute path.");
return 0;
}
fm->path = dm_strdup(argv[0]);
if (!fm->path) {
_early_log("Could not allocate memory for path argument.");
return 0;
}
argc--;
argv++;
/* parse <mode> */
if (!argv[0] || !strlen(argv[0])) {
_early_log("Mode argument is required.");
return 0;
}
fm->mode = dm_filemapd_mode_from_string(argv[0]);
if (fm->mode == DM_FILEMAPD_FOLLOW_NONE)
return 0;
argc--;
argv++;
/* parse [<foreground>[<verbose>]] */
if (argc) {
errno = 0;
_foreground = (int) strtol(argv[0], &endptr, 10);
if (errno || *endptr) {
_early_log("Could not parse debug argument: %s.",
argv[0]);
return 0;
}
argc--;
argv++;
if (argc) {
errno = 0;
_verbose = (int) strtol(argv[0], &endptr, 10);
if (errno || *endptr) {
_early_log("Could not parse verbose "
"argument: %s", argv[0]);
return 0;
}
if (_verbose < 0 || _verbose > 3) {
_early_log("Verbose argument out of range: %d.",
_verbose);
return 0;
}
}
}
return 1;
}
static int _filemap_fd_update_blocks(struct filemap_monitor *fm)
{
struct stat buf;
if (fm->fd < 0) {
log_error("Filemap fd is not open.");
return 0;
}
if (fstat(fm->fd, &buf)) {
log_error("Failed to fstat filemap file descriptor.");
return 0;
}
fm->blocks = buf.st_blocks;
return 1;
}
static int _filemap_fd_check_changed(struct filemap_monitor *fm)
{
int64_t old_blocks;
old_blocks = fm->blocks;
if (!_filemap_fd_update_blocks(fm))
return -1;
return (fm->blocks != old_blocks);
}
static void _filemap_monitor_close_fd(struct filemap_monitor *fm)
{
if (close(fm->fd))
log_error("Error closing file descriptor.");
fm->fd = -1;
}
static void _filemap_monitor_end_notify(struct filemap_monitor *fm)
{
inotify_rm_watch(fm->inotify_fd, fm->inotify_watch_fd);
}
static int _filemap_monitor_set_notify(struct filemap_monitor *fm)
{
int inotify_fd, watch_fd;
/*
* Set IN_NONBLOCK since we do not want to block in event read()
* calls. Do not set IN_CLOEXEC as dmfilemapd is single-threaded
* and does not fork or exec.
*/
if ((inotify_fd = inotify_init1(IN_NONBLOCK)) < 0) {
log_sys_error("inotify_init1", "IN_NONBLOCK");
return 0;
}
if ((watch_fd = inotify_add_watch(inotify_fd, fm->path,
IN_MODIFY | IN_DELETE_SELF)) < 0) {
log_sys_error("inotify_add_watch", fm->path);
return 0;
}
fm->inotify_fd = inotify_fd;
fm->inotify_watch_fd = watch_fd;
return 1;
}
static int _filemap_monitor_reopen_fd(struct filemap_monitor *fm)
{
int tries = FILEMAPD_NOFILE_WAIT_TRIES;
/*
* In DM_FILEMAPD_FOLLOW_PATH mode, inotify watches must be
* re-established whenever the file at the watched path is
* changed.
*
* FIXME: stat file and skip if inode is unchanged.
*/
if (fm->fd > 0)
log_error("Filemap file descriptor already open.");
while ((fm->fd < 0) && --tries)
if (((fm->fd = open(fm->path, O_RDONLY)) < 0) && tries)
_filemap_monitor_wait(FILEMAPD_NOFILE_WAIT_USECS);
if (!tries && (fm->fd < 0)) {
log_error("Could not re-open file descriptor.");
return 0;
}
return _filemap_monitor_set_notify(fm);
}
static int _filemap_monitor_get_events(struct filemap_monitor *fm)
{
/* alignment as per man(7) inotify */
char buf[sizeof(struct inotify_event) + NAME_MAX + 1]
__attribute__ ((aligned(__alignof__(struct inotify_event))));
struct inotify_event *event;
int check = 0;
ssize_t len;
char *ptr;
/*
* Close the file descriptor for the file being monitored here
* when mode=path: this will allow the inode to be de-allocated,
* and an IN_DELETE_SELF event generated in the case that the
* daemon is holding the last open reference to the file.
*/
if (fm->mode == DM_FILEMAPD_FOLLOW_PATH) {
_filemap_monitor_end_notify(fm);
_filemap_monitor_close_fd(fm);
}
len = read(fm->inotify_fd, (void *) &buf, sizeof(buf));
/* no events to read? */
if (len < 0 && (errno == EAGAIN))
goto out;
/* interrupted by signal? */
if (len < 0 && (errno == EINTR))
goto out;
if (len < 0)
return -1;
if (!len)
goto out;
for (ptr = buf; ptr < buf + len; ptr += sizeof(*event) + event->len) {
event = (struct inotify_event *) ptr;
if (event->mask & IN_DELETE_SELF)
fm->deleted = 1;
if (event->mask & IN_MODIFY)
check = 1;
/*
* Event IN_IGNORED is generated when a file has been deleted
* and IN_DELETE_SELF generated, and indicates that the file
* watch has been automatically removed.
*
* This can only happen for the DM_FILEMAPD_FOLLOW_PATH mode,
* since inotify IN_DELETE events are generated at the time
* the inode is destroyed: DM_FILEMAPD_FOLLOW_INODE will hold
* the file descriptor open, meaning that the event will not
* be generated until after the daemon closes the file.
*
* The event is ignored here since inotify monitoring will
* be reestablished (or the daemon will terminate) following
* deletion of a DM_FILEMAPD_FOLLOW_PATH monitored file.
*/
if (event->mask & IN_IGNORED)
log_very_verbose("Inotify watch removed: IN_IGNORED "
"in event->mask");
}
out:
/*
* Re-open file descriptor if required and log disposition.
*/
if (fm->mode == DM_FILEMAPD_FOLLOW_PATH)
if (!_filemap_monitor_reopen_fd(fm))
return -1;
log_very_verbose("exiting _filemap_monitor_get_events() with "
"deleted=%d, check=%d", fm->deleted, check);
return check;
}
static void _filemap_monitor_destroy(struct filemap_monitor *fm)
{
if (fm->fd > 0) {
_filemap_monitor_end_notify(fm);
_filemap_monitor_close_fd(fm);
}
dm_free((void *) fm->program_id);
dm_free(fm->path);
}
static int _filemap_monitor_check_same_file(int fd1, int fd2)
{
struct stat buf1, buf2;
if ((fd1 < 0) || (fd2 < 0))
return 0;
if (fstat(fd1, &buf1)) {
log_error("Failed to fstat file descriptor %d", fd1);
return -1;
}
if (fstat(fd2, &buf2)) {
log_error("Failed to fstat file descriptor %d", fd2);
return -1;
}
return ((buf1.st_dev == buf2.st_dev) && (buf1.st_ino == buf2.st_ino));
}
static int _filemap_monitor_check_file_unlinked(struct filemap_monitor *fm)
{
char path_buf[PATH_MAX];
char link_buf[PATH_MAX];
int same, fd;
ssize_t len;
fm->deleted = 0;
same = 0;
if ((fd = open(fm->path, O_RDONLY)) < 0)
goto check_unlinked;
same = _filemap_monitor_check_same_file(fm->fd, fd);
if (close(fd))
log_error("Error closing fd %d", fd);
if (same < 0)
return 0;
if (same)
return 1;
check_unlinked:
/*
* The file has been unlinked from its original location: test
* whether it is still reachable in the filesystem, or if it is
* unlinked and anonymous.
*/
if (dm_snprintf(path_buf, sizeof(path_buf), DEFAULT_PROC_DIR
"/%d/fd/%d", getpid(), fm->fd) < 0) {
log_error("Could not format pid path.");
return 0;
}
if ((len = readlink(path_buf, link_buf, sizeof(link_buf) - 1)) < 0) {
log_error("readlink failed for " DEFAULT_PROC_DIR "/%d/fd/%d.",
getpid(), fm->fd);
return 0;
}
link_buf[len] = '\0';
/*
* Try to re-open the file, from the path now reported in /proc/pid/fd.
*/
if ((fd = open(link_buf, O_RDONLY)) < 0)
fm->deleted = 1;
else
same = _filemap_monitor_check_same_file(fm->fd, fd);
if ((fd >= 0) && close(fd))
log_error("Error closing fd %d", fd);
if (same < 0)
return 0;
/* Should not happen with normal /proc. */
if ((fd > 0) && !same) {
log_error("File descriptor mismatch: %d and %s (read from %s) "
"are not the same file!", fm->fd, link_buf, path_buf);
return 0;
}
return 1;
}
static int _daemonise(struct filemap_monitor *fm)
{
pid_t pid = 0;
int fd;
if (!setsid()) {
_early_log("setsid failed.");
return 0;
}
if ((pid = fork()) < 0) {
_early_log("Failed to fork daemon process.");
return 0;
}
if (pid > 0) {
if (_verbose)
_early_log("Started dmfilemapd with pid=%d", pid);
exit(0);
}
if (chdir("/")) {
_early_log("Failed to change directory.");
return 0;
}
if (!_verbose) {
if (close(STDIN_FILENO))
_early_log("Error closing stdin");
if (close(STDOUT_FILENO))
_early_log("Error closing stdout");
if (close(STDERR_FILENO))
_early_log("Error closing stderr");
if ((open("/dev/null", O_RDONLY) < 0) ||
(open("/dev/null", O_WRONLY) < 0) ||
(open("/dev/null", O_WRONLY) < 0)) {
_early_log("Error opening stdio streams.");
return 0;
}
}
/* TODO: Use libdaemon/server/daemon-server.c _daemonise() */
for (fd = (int) sysconf(_SC_OPEN_MAX) - 1; fd > STDERR_FILENO; fd--) {
if (fd == fm->fd)
continue;
(void) close(fd);
}
return 1;
}
static int _update_regions(struct dm_stats *dms, struct filemap_monitor *fm)
{
uint64_t *regions = NULL, *region, nr_regions = 0;
regions = dm_stats_update_regions_from_fd(dms, fm->fd, fm->group_id);
if (!regions) {
log_error("Failed to update filemap regions for group_id="
FMTu64 ".", fm->group_id);
return 0;
}
for (region = regions; *region != DM_STATS_REGIONS_ALL; region++)
nr_regions++;
if (!nr_regions)
log_warn("File contains no extents: exiting.");
if (nr_regions && (regions[0] != fm->group_id)) {
log_warn("group_id changed from " FMTu64 " to " FMTu64,
fm->group_id, regions[0]);
fm->group_id = regions[0];
}
dm_free(regions);
fm->nr_regions = nr_regions;
return 1;
}
static int _dmfilemapd(struct filemap_monitor *fm)
{
int running = 1, check = 0, open = 0;
const char *program_id;
struct dm_stats *dms;
/*
* The correct program_id is retrieved from the group leader
* following the call to dm_stats_list().
*/
if (!(dms = dm_stats_create(NULL)))
goto_bad;
if (!dm_stats_bind_from_fd(dms, fm->fd)) {
log_error("Could not bind dm_stats handle to file descriptor "
"%d", fm->fd);
goto bad;
}
if (!_filemap_monitor_set_notify(fm))
goto bad;
if (!_filemap_fd_update_blocks(fm))
goto bad;
if (!dm_stats_list(dms, DM_STATS_ALL_PROGRAMS)) {
log_error("Failed to list stats handle.");
goto bad;
}
/*
* Take the program_id for new regions (created by calls to
* dm_stats_update_regions_from_fd()) from the value used by
* the group leader.
*/
program_id = dm_stats_get_region_program_id(dms, fm->group_id);
if (program_id)
fm->program_id = dm_strdup(program_id);
else
fm->program_id = NULL;
dm_stats_set_program_id(dms, 1, program_id);
do {
if (!dm_stats_group_present(dms, fm->group_id)) {
log_info("Filemap group removed: exiting.");
running = 0;
continue;
}
if ((check = _filemap_monitor_get_events(fm)) < 0)
goto bad;
if (!check)
goto wait;
if ((check = _filemap_fd_check_changed(fm)) < 0)
goto bad;
if (check && !_update_regions(dms, fm))
goto bad;
running = !!fm->nr_regions;
if (!running)
continue;
wait:
_filemap_monitor_wait(FILEMAPD_WAIT_USECS);
/* mode=inode termination condions */
if (fm->mode == DM_FILEMAPD_FOLLOW_INODE) {
if (!_filemap_monitor_check_file_unlinked(fm))
goto bad;
if (fm->deleted && !(open = _is_open(fm->path))) {
log_info("File unlinked and closed: exiting.");
running = 0;
} else if (fm->deleted && open)
log_verbose("File unlinked and open: "
"continuing.");
}
if (!dm_stats_list(dms, NULL)) {
log_error("Failed to list stats handle.");
goto bad;
}
} while (running);
_filemap_monitor_destroy(fm);
dm_stats_destroy(dms);
return 0;
bad:
_filemap_monitor_destroy(fm);
dm_stats_destroy(dms);
log_error("Exiting");
return 1;
}
static const char * const _mode_names[] = {
"inode",
"path"
};
/*
* dmfilemapd <fd> <group_id> <path> <mode> [<foreground>[<log_level>]]
*/
int main(int argc, char **argv)
{
struct filemap_monitor fm;
memset(&fm, 0, sizeof(fm));
if (!_parse_args(argc, argv, &fm)) {
dm_free(fm.path);
return 1;
}
_setup_logging();
log_info("Starting dmfilemapd with fd=%d, group_id=" FMTu64 " "
"mode=%s, path=%s", fm.fd, fm.group_id,
_mode_names[fm.mode], fm.path);
if (!_foreground && !_daemonise(&fm)) {
dm_free(fm.path);
return 1;
}
return _dmfilemapd(&fm);
}

View File

@@ -1,4 +0,0 @@
path.py
lvmdbusd
lvmdb.py
lvm_shell_proxy.py

View File

@@ -26,11 +26,14 @@ LVMDBUS_SRCDIR_FILES = \
__init__.py \
job.py \
loader.py \
lvmdb.py \
main.py \
lvm_shell_proxy.py \
lv.py \
manager.py \
objectmanager.py \
pv.py \
refresh.py \
request.py \
state.py \
udevwatch.py \
@@ -38,21 +41,14 @@ LVMDBUS_SRCDIR_FILES = \
vg.py
LVMDBUS_BUILDDIR_FILES = \
lvmdb.py \
lvm_shell_proxy.py \
path.py
LVMDBUSD = lvmdbusd
CLEAN_DIRS += __pycache__
LVMDBUSD = $(srcdir)/lvmdbusd
include $(top_builddir)/make.tmpl
.PHONY: install_lvmdbusd
all:
test -x $(LVMDBUSD) || chmod 755 $(LVMDBUSD)
install_lvmdbusd:
$(INSTALL_DIR) $(sbindir)
$(INSTALL_SCRIPT) $(LVMDBUSD) $(sbindir)
@@ -68,5 +64,4 @@ install_lvm2: install_lvmdbusd
install: install_lvm2
DISTCLEAN_TARGETS+= \
$(LVMDBUS_BUILDDIR_FILES) \
$(LVMDBUSD)
$(LVMDBUS_BUILDDIR_FILES)

View File

@@ -8,7 +8,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dbus
import dbus.service
from . import cfg
from .utils import get_properties, add_properties, get_object_property_diff, \
log_debug
@@ -38,7 +37,7 @@ class AutomatedProperties(dbus.service.Object):
props = {}
for i in self.interface():
props[i] = AutomatedProperties._get_all_prop(self, i)
props[i] = self.GetAll(i)
return self._ap_o_path, props
@@ -65,52 +64,31 @@ class AutomatedProperties(dbus.service.Object):
return self._ap_interface
@staticmethod
def _get_prop(obj, interface_name, property_name):
value = getattr(obj, property_name)
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v')
def Get(self, interface_name, property_name):
value = getattr(self, property_name)
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
log_debug('Get (%s), type (%s), value(%s)' %
(property_name, str(type(value)), str(value)))
return value
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v',
async_callbacks=('cb', 'cbe'))
def Get(self, interface_name, property_name, cb, cbe):
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
r = cfg.create_request_entry(
-1, AutomatedProperties._get_prop,
(self, interface_name, property_name),
cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _get_all_prop(obj, interface_name):
if interface_name in obj.interface(True):
in_signature='s', out_signature='a{sv}')
def GetAll(self, interface_name):
if interface_name in self.interface(True):
# Using introspection, lets build this dynamically
properties = get_properties(obj)
properties = get_properties(self)
if interface_name in properties:
return properties[interface_name][1]
return {}
raise dbus.exceptions.DBusException(
obj._ap_interface,
self._ap_interface,
'The object %s does not implement the %s interface'
% (obj.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}',
async_callbacks=('cb', 'cbe'))
def GetAll(self, interface_name, cb, cbe):
r = cfg.create_request_entry(
-1, AutomatedProperties._get_all_prop,
(self, interface_name),
cb, cbe, False)
cfg.worker_q.put(r)
% (self.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ssv')
@@ -180,7 +158,10 @@ class AutomatedProperties(dbus.service.Object):
cfg.om.lookup_update(self, new_id[0], new_id[1])
# Grab the properties values, then replace the state of the object
# and retrieve the new values.
# and retrieve the new values
# TODO: We need to add locking to prevent concurrent access to the
# properties so that a client is not accessing while we are
# replacing.
o_prop = get_properties(self)
self.state = new_state
n_prop = get_properties(self)

View File

@@ -7,15 +7,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import subprocess
from . import cfg
from .cmdhandler import options_to_cli_args, LvmExecutionMeta
import dbus
from .utils import pv_range_append, pv_dest_ranges, log_error, log_debug,\
add_no_notify
import os
import threading
import time
from .cmdhandler import options_to_cli_args
import dbus
from .utils import pv_range_append, pv_dest_ranges, log_error, log_debug
import traceback
_rlock = threading.RLock()
_thread_list = list()
def pv_move_lv_cmd(move_options, lv_full_name,
@@ -39,58 +41,15 @@ def lv_merge_cmd(merge_options, lv_full_name):
return cmd
def _move_merge(interface_name, command, job_state):
# We need to execute these command stand alone by forking & exec'ing
# the command always as we will be getting periodic output from them on
# the status of the long running operation.
command.insert(0, cfg.LVM_CMD)
def _move_merge(interface_name, cmd, job_state):
add(cmd, job_state)
# Instruct lvm to not register an event with us
command = add_no_notify(command)
#(self, start, ended, cmd, ec, stdout_txt, stderr_txt)
meta = LvmExecutionMeta(time.time(), 0, command, -1000, None, None)
cfg.blackbox.add(meta)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
env=os.environ,
stderr=subprocess.PIPE, close_fds=True)
log_debug("Background process for %s is %d" %
(str(command), process.pid))
lines_iterator = iter(process.stdout.readline, b"")
for line in lines_iterator:
line_str = line.decode("utf-8")
# Check to see if the line has the correct number of separators
try:
if line_str.count(':') == 2:
(device, ignore, percentage) = line_str.split(':')
job_state.Percent = round(
float(percentage.strip()[:-1]), 1)
# While the move is in progress we need to periodically update
# the state to reflect where everything is at.
cfg.load()
except ValueError:
log_error("Trying to parse percentage which failed for %s" %
line_str)
out = process.communicate()
with meta.lock:
meta.ended = time.time()
meta.ec = process.returncode
meta.stderr_txt = out[1]
if process.returncode == 0:
job_state.Percent = 100
else:
done = job_state.Wait(-1)
if not done:
ec, err_msg = job_state.GetError
raise dbus.exceptions.DBusException(
interface_name,
'Exit code %s, stderr = %s' % (str(process.returncode), out[1]))
'Exit code %s, stderr = %s' % (str(ec), err_msg))
cfg.load()
return '/'
@@ -125,6 +84,8 @@ def move(interface_name, lv_name, pv_src_obj, pv_source_range,
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
# Generate the command line for this command, but don't
# execute it.
cmd = pv_move_lv_cmd(move_options,
lv_name,
pv_src.lvm_id,
@@ -149,15 +110,92 @@ def merge(interface_name, lv_uuid, lv_name, merge_options, job_state):
'LV with uuid %s and name %s not present!' % (lv_uuid, lv_name))
def _run_cmd(req):
log_debug(
"_run_cmd: Running method: %s with args %s" %
(str(req.method), str(req.arguments)))
req.run_cmd()
log_debug("_run_cmd: complete!")
def background_reaper():
while cfg.run.value != 0:
with _rlock:
num_threads = len(_thread_list) - 1
if num_threads >= 0:
for i in range(num_threads, -1, -1):
_thread_list[i].join(0)
if not _thread_list[i].is_alive():
log_debug("Removing thread: %s" % _thread_list[i].name)
_thread_list.pop(i)
time.sleep(3)
def cmd_runner(request):
t = threading.Thread(target=_run_cmd, args=(request,),
name="cmd_runner %s" % str(request.method))
def background_execute(command, background_job):
# Wrap this whole operation in an exception handler, otherwise if we
# hit a code bug we will silently exit this thread without anyone being
# the wiser.
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
lines_iterator = iter(process.stdout.readline, b"")
for line in lines_iterator:
line_str = line.decode("utf-8")
# Check to see if the line has the correct number of separators
try:
if line_str.count(':') == 2:
(device, ignore, percentage) = line_str.split(':')
background_job.Percent = \
round(float(percentage.strip()[:-1]), 1)
except ValueError:
log_error("Trying to parse percentage which failed for %s" %
line_str)
out = process.communicate()
if process.returncode == 0:
background_job.Percent = 100
background_job.set_result(process.returncode, out[1])
except Exception:
# In the unlikely event that we blow up, we need to unblock caller which
# is waiting on an answer.
st = traceback.format_exc()
error = "Exception in background thread: \n%s" % st
log_error(error)
background_job.set_result(1, error)
def add(command, reporting_job):
# Create the thread, get it running and then add it to the list
t = threading.Thread(
target=background_execute,
name="thread: " + ' '.join(command),
args=(command, reporting_job))
t.start()
with _rlock:
_thread_list.append(t)
def wait_thread(job, timeout, cb, cbe):
# We need to put the wait on it's own thread, so that we don't block the
# entire dbus queue processing thread
try:
cb(job.state.Wait(timeout))
except Exception as e:
cbe("Wait exception: %s" % str(e))
return 0
def add_wait(job, timeout, cb, cbe):
if timeout == 0:
# Users are basically polling, do not create thread
cb(job.Complete)
else:
t = threading.Thread(
target=wait_thread,
name="thread job.Wait: %s" % job.dbus_object_path(),
args=(job, timeout, cb, cbe)
)
t.start()
with _rlock:
_thread_list.append(t)

View File

@@ -11,8 +11,10 @@ import os
import multiprocessing
import queue
import itertools
from lvmdbusd import path
try:
from . import path
except SystemError:
import path
LVM_CMD = os.getenv('LVM_BINARY', path.LVM_BINARY)
@@ -22,28 +24,24 @@ om = None
# This is the global bus connection
bus = None
# Command line args
args = None
# Set to true if we are depending on external events for updates
got_external_event = False
# Shared state variable across all processes
run = multiprocessing.Value('i', 1)
# If this is set to true, the current setup support lvm shell and we are
# running in that mode of operation
SHELL_IN_USE = None
# Debug
DEBUG = True
# Use lvm shell
USE_SHELL = False
# Lock used by pprint
stdout_lock = multiprocessing.Lock()
kick_q = multiprocessing.Queue()
worker_q = queue.Queue()
# Main event loop
loop = None
BUS_NAME = os.getenv('LVM_DBUS_NAME', 'com.redhat.lvmdbus1')
BASE_INTERFACE = 'com.redhat.lvmdbus1'
PV_INTERFACE = BASE_INTERFACE + '.Pv'
VG_INTERFACE = BASE_INTERFACE + '.Vg'
@@ -77,13 +75,9 @@ hidden_lv = itertools.count()
# Used to prevent circular imports...
load = None
event = None
# Global cached state
db = None
# lvm flight recorder
blackbox = None
# RequestEntry ctor
create_request_entry = None

View File

@@ -12,12 +12,15 @@ import time
import threading
from itertools import chain
import collections
import traceback
import os
from lvmdbusd import cfg
from lvmdbusd.utils import pv_dest_ranges, log_debug, log_error, add_no_notify
from lvmdbusd.lvm_shell_proxy import LVMShellProxy
try:
from . import cfg
from .utils import pv_dest_ranges, log_debug, log_error
from .lvm_shell_proxy import LVMShellProxy
except SystemError:
import cfg
from utils import pv_dest_ranges, log_debug, log_error
from lvm_shell_proxy import LVMShellProxy
try:
import simplejson as json
@@ -33,11 +36,14 @@ total_count = 0
# at the same time.
cmd_lock = threading.RLock()
# The actual method which gets called to invoke the lvm command, can vary
# from forking a new process to using lvm shell
_t_call = None
class LvmExecutionMeta(object):
def __init__(self, start, ended, cmd, ec, stdout_txt, stderr_txt):
self.lock = threading.RLock()
self.start = start
self.ended = ended
self.cmd = cmd
@@ -46,30 +52,28 @@ class LvmExecutionMeta(object):
self.stderr_txt = stderr_txt
def __str__(self):
with self.lock:
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
return "EC= %d for %s\n" \
"STARTED: %f, ENDED: %f\n" \
"STDOUT=%s\n" \
"STDERR=%s\n" % \
(self.ec, str(self.cmd), self.start, self.ended, self.stdout_txt,
self.stderr_txt)
class LvmFlightRecorder(object):
def __init__(self, size=16):
self.queue = collections.deque(maxlen=size)
def __init__(self):
self.queue = collections.deque(maxlen=16)
def add(self, lvm_exec_meta):
self.queue.append(lvm_exec_meta)
def dump(self):
with cmd_lock:
if len(self.queue):
log_error("LVM dbus flight recorder START")
for c in self.queue:
log_error(str(c))
log_error("LVM dbus flight recorder END")
log_error("LVM dbus flight recorder START")
for c in self.queue:
log_error(str(c))
log_error("LVM dbus flight recorder END")
cfg.blackbox = LvmFlightRecorder()
@@ -95,10 +99,8 @@ def call_lvm(command, debug=False):
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command.insert(0, cfg.LVM_CMD)
command = add_no_notify(command)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True,
env=os.environ)
process = Popen(command, stdout=PIPE, stderr=PIPE, close_fds=True)
out = process.communicate()
stdout_text = bytes(out[0]).decode("utf-8")
@@ -107,48 +109,37 @@ def call_lvm(command, debug=False):
if debug or process.returncode != 0:
_debug_c(command, process.returncode, (stdout_text, stderr_text))
return process.returncode, stdout_text, stderr_text
if process.returncode == 0:
if cfg.DEBUG and out[1] and len(out[1]) and 'help' not in command:
log_error('WARNING: lvm is out-putting text to STDERR on success!')
_debug_c(command, process.returncode, (stdout_text, stderr_text))
# The actual method which gets called to invoke the lvm command, can vary
# from forking a new process to using lvm shell
_t_call = call_lvm
return process.returncode, stdout_text, stderr_text
def _shell_cfg():
global _t_call
# noinspection PyBroadException
try:
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
cfg.SHELL_IN_USE = lvm_shell
return True
except Exception:
_t_call = call_lvm
cfg.SHELL_IN_USE = None
log_error(traceback.format_exc())
log_error("Unable to utilize lvm shell, dropping back to fork & exec")
return False
log_debug('Using lvm shell!')
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
if cfg.USE_SHELL:
_shell_cfg()
else:
_t_call = call_lvm
def set_execution(shell):
global _t_call
with cmd_lock:
# If the user requested lvm shell and we are currently setup that
# way, just return
if cfg.SHELL_IN_USE and shell:
return True
else:
if not shell and cfg.SHELL_IN_USE:
cfg.SHELL_IN_USE.exit_shell()
cfg.SHELL_IN_USE = None
_t_call = call_lvm
_t_call = None
if shell:
if cfg.args.use_json:
return _shell_cfg()
else:
return False
return True
log_debug('Using lvm shell!')
lvm_shell = LVMShellProxy()
_t_call = lvm_shell.call_lvm
else:
_t_call = call_lvm
def time_wrapper(command, debug=False):
@@ -228,10 +219,6 @@ def pv_remove(device, remove_options):
return call(cmd)
def _qt(tag_name):
return '@%s' % tag_name
def _tag(operation, what, add, rm, tag_options):
cmd = [operation]
cmd.extend(options_to_cli_args(tag_options))
@@ -242,11 +229,9 @@ def _tag(operation, what, add, rm, tag_options):
cmd.append(what)
if add:
cmd.extend(list(chain.from_iterable(
('--addtag', _qt(x)) for x in add)))
cmd.extend(list(chain.from_iterable(('--addtag', x) for x in add)))
if rm:
cmd.extend(list(chain.from_iterable(
('--deltag', _qt(x)) for x in rm)))
cmd.extend(list(chain.from_iterable(('--deltag', x) for x in rm)))
return call(cmd, False)
@@ -281,7 +266,7 @@ def vg_lv_create(vg_name, create_options, name, size_bytes, pv_dests):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--name', name, vg_name])
pv_dest_ranges(cmd, pv_dests)
return call(cmd)
@@ -298,7 +283,20 @@ def vg_lv_snapshot(vg_name, snapshot_options, name, size_bytes):
return call(cmd)
def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
if not thin_pool:
cmd.extend(['--size', str(size_bytes) + 'B'])
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
@@ -307,19 +305,6 @@ def _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool):
else:
cmd.extend(['--thin', '--size', str(size_bytes) + 'B'])
cmd.extend(['--yes'])
return cmd
def vg_lv_create_linear(vg_name, create_options, name, size_bytes, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--name', name, vg_name])
return call(cmd)
def vg_lv_create_striped(vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool):
cmd = _vg_lv_create_common_cmd(create_options, size_bytes, thin_pool)
cmd.extend(['--stripes', str(num_stripes)])
if stripe_size_kb != 0:
@@ -344,7 +329,7 @@ def _vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
if stripe_size_kb != 0:
cmd.extend(['--stripesize', str(stripe_size_kb)])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
@@ -357,15 +342,14 @@ def vg_lv_create_raid(vg_name, create_options, name, raid_type, size_bytes,
size_bytes, num_stripes, stripe_size_kb)
def vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies):
def vg_lv_create_mirror(vg_name, create_options, name, size_bytes, num_copies):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--type', 'mirror'])
cmd.extend(['--mirrors', str(num_copies)])
cmd.extend(['--size', str(size_bytes) + 'B'])
cmd.extend(['--name', name, vg_name, '--yes'])
cmd.extend(['--name', name, vg_name])
return call(cmd)
@@ -419,7 +403,7 @@ def lv_lv_create(lv_full_name, create_options, name, size_bytes):
cmd = ['lvcreate']
cmd.extend(options_to_cli_args(create_options))
cmd.extend(['--virtualsize', str(size_bytes) + 'B', '-T'])
cmd.extend(['--name', name, lv_full_name, '--yes'])
cmd.extend(['--name', name, lv_full_name])
return call(cmd)
@@ -451,11 +435,8 @@ def supports_json():
cmd = ['help']
rc, out, err = call(cmd)
if rc == 0:
if cfg.SHELL_IN_USE:
if 'fullreport' in err:
return True
else:
if 'fullreport' in err:
return True
return False
@@ -464,7 +445,7 @@ def lvm_full_report_json():
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pv_missing']
'vg_uuid']
pv_seg_columns = ['pvseg_start', 'pvseg_size', 'segtype',
'pv_uuid', 'lv_uuid', 'pv_name']
@@ -480,9 +461,7 @@ def lvm_full_report_json():
'vg_name', 'pool_lv_uuid', 'pool_lv', 'origin_uuid',
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
'metadata_lv', 'lv_parent', 'lv_role', 'lv_layout']
lv_seg_columns = ['seg_pe_ranges', 'segtype', 'lv_uuid']
@@ -498,14 +477,7 @@ def lvm_full_report_json():
rc, out, err = call(cmd)
if rc == 0:
# With the current implementation, if we are using the shell then we
# are using JSON and JSON is returned back to us as it was parsed to
# figure out if we completed OK or not
if cfg.SHELL_IN_USE:
assert(type(out) == dict)
return out
else:
return json.loads(out)
return json.loads(out)
return None
@@ -519,7 +491,7 @@ def pv_retrieve_with_segs(device=None):
'pv_used', 'dev_size', 'pv_mda_size', 'pv_mda_free',
'pv_ba_start', 'pv_ba_size', 'pe_start', 'pv_pe_count',
'pv_pe_alloc_count', 'pv_attr', 'pv_tags', 'vg_name',
'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype', 'pv_missing']
'vg_uuid', 'pvseg_start', 'pvseg_size', 'segtype']
# Lvm has some issues where it returns failure when querying pvs when other
# operations are in process, see:
@@ -555,7 +527,7 @@ def pv_resize(device, size_bytes, create_options):
cmd.extend(options_to_cli_args(create_options))
if size_bytes != 0:
cmd.extend(['--yes', '--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend(['--setphysicalvolumesize', str(size_bytes) + 'B'])
cmd.extend([device])
return call(cmd)
@@ -620,10 +592,10 @@ def vg_reduce(vg_name, missing, pv_devices, reduce_options):
cmd = ['vgreduce']
cmd.extend(options_to_cli_args(reduce_options))
if len(pv_devices) == 0:
cmd.append('--all')
if missing:
cmd.append('--removemissing')
elif len(pv_devices) == 0:
cmd.append('--all')
cmd.append(vg_name)
cmd.extend(pv_devices)
@@ -732,9 +704,7 @@ def lv_retrieve_with_segments():
'origin', 'data_percent',
'lv_attr', 'lv_tags', 'vg_uuid', 'lv_active', 'data_lv',
'metadata_lv', 'seg_pe_ranges', 'segtype', 'lv_parent',
'lv_role', 'lv_layout',
'snap_percent', 'metadata_percent', 'copy_percent',
'sync_percent', 'lv_metadata_size', 'move_pv', 'move_pv_uuid']
'lv_role', 'lv_layout']
cmd = _dc('lvs', ['-a', '-o', ','.join(columns)])
rc, out, err = call(cmd)
@@ -751,4 +721,4 @@ if __name__ == '__main__':
pv_data = pv_retrieve_with_segs()
for p in pv_data:
print(str(p))
log_debug(str(p))

View File

@@ -11,158 +11,20 @@ from .pv import load_pvs
from .vg import load_vgs
from .lv import load_lvs
from . import cfg
from .utils import MThreadRunner, log_debug, log_error
import threading
import queue
import traceback
def _main_thread_load(refresh=True, emit_signal=True):
def load(refresh=True, emit_signal=True, cache_refresh=True, log=True):
num_total_changes = 0
num_total_changes += load_pvs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_vgs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_lvs(
refresh=refresh,
emit_signal=emit_signal,
cache_refresh=False)[1]
return num_total_changes
def load(refresh=True, emit_signal=True, cache_refresh=True, log=True,
need_main_thread=True):
# Go through and load all the PVs, VGs and LVs
if cache_refresh:
cfg.db.refresh(log)
if need_main_thread:
rc = MThreadRunner(_main_thread_load, refresh, emit_signal).done()
else:
rc = _main_thread_load(refresh, emit_signal)
num_total_changes += load_pvs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_vgs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
num_total_changes += load_lvs(refresh=refresh, emit_signal=emit_signal,
cache_refresh=False)[1]
return rc
# Even though lvm can handle multiple changes concurrently it really doesn't
# make sense to make a 1-1 fetch of data for each change of lvm because when
# we fetch the data once all previous changes are reflected.
class StateUpdate(object):
class UpdateRequest(object):
def __init__(self, refresh, emit_signal, cache_refresh, log,
need_main_thread):
self.is_done = False
self.refresh = refresh
self.emit_signal = emit_signal
self.cache_refresh = cache_refresh
self.log = log
self.need_main_thread = need_main_thread
self.result = None
self.cond = threading.Condition(threading.Lock())
def done(self):
with self.cond:
if not self.is_done:
self.cond.wait()
return self.result
def set_result(self, result):
with self.cond:
self.result = result
self.is_done = True
self.cond.notify_all()
@staticmethod
def update_thread(obj):
queued_requests = []
while cfg.run.value != 0:
# noinspection PyBroadException
try:
refresh = True
emit_signal = True
cache_refresh = True
log = True
need_main_thread = True
with obj.lock:
wait = not obj.deferred
obj.deferred = False
if len(queued_requests) == 0 and wait:
queued_requests.append(obj.queue.get(True, 2))
# Ok we have one or the deferred queue has some,
# check if any others
try:
while True:
queued_requests.append(obj.queue.get(False))
except queue.Empty:
pass
if len(queued_requests) > 1:
log_debug("Processing %d updates!" % len(queued_requests),
'bg_black', 'fg_light_green')
# We have what we can, run the update with the needed options
for i in queued_requests:
if not i.refresh:
refresh = False
if not i.emit_signal:
emit_signal = False
if not i.cache_refresh:
cache_refresh = False
if not i.log:
log = False
if not i.need_main_thread:
need_main_thread = False
num_changes = load(refresh, emit_signal, cache_refresh, log,
need_main_thread)
# Update is done, let everyone know!
for i in queued_requests:
i.set_result(num_changes)
# Only clear out the requests after we have given them a result
# otherwise we can orphan the waiting threads and they never
# wake up if we get an exception
queued_requests = []
except queue.Empty:
pass
except Exception:
st = traceback.format_exc()
log_error("update_thread exception: \n%s" % st)
cfg.blackbox.dump()
def __init__(self):
self.lock = threading.RLock()
self.queue = queue.Queue()
self.deferred = False
# Do initial load
load(refresh=False, emit_signal=False, need_main_thread=False)
self.thread = threading.Thread(target=StateUpdate.update_thread,
args=(self,),
name="StateUpdate.update_thread")
def load(self, refresh=True, emit_signal=True, cache_refresh=True,
log=True, need_main_thread=True):
# Place this request on the queue and wait for it to be completed
req = StateUpdate.UpdateRequest(refresh, emit_signal, cache_refresh,
log, need_main_thread)
self.queue.put(req)
return req.done()
def event(self):
with self.lock:
self.deferred = True
return num_total_changes

View File

@@ -8,55 +8,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .automatedproperties import AutomatedProperties
from .utils import job_obj_path_generate, mt_async_call
from .utils import job_obj_path_generate
from . import cfg
from .cfg import JOB_INTERFACE
import dbus
import threading
# noinspection PyUnresolvedReferences
from gi.repository import GLib
# Class that handles a client waiting for something to be complete. We either
# get a timeout or the operation is done.
class WaitingClient(object):
# A timeout occurred
@staticmethod
def _timeout(wc):
with wc.rlock:
if wc.in_use:
wc.in_use = False
# Remove ourselves from waiting client
wc.job_state.remove_waiting_client(wc)
wc.timer_id = -1
mt_async_call(wc.cb, wc.job_state.Complete)
wc.job_state = None
def __init__(self, job_state, tmo, cb, cbe):
self.rlock = threading.RLock()
self.job_state = job_state
self.cb = cb
self.cbe = cbe
self.in_use = True # Indicates if object is in play
self.timer_id = -1
if tmo > 0:
self.timer_id = GLib.timeout_add_seconds(
tmo, WaitingClient._timeout, self)
# The job finished before the timer popped and we are being notified that
# it's done
def notify(self):
with self.rlock:
if self.in_use:
self.in_use = False
# Clear timer
if self.timer_id != -1:
GLib.source_remove(self.timer_id)
self.timer_id = -1
mt_async_call(self.cb, self.job_state.Complete)
self.job_state = None
from . import background
# noinspection PyPep8Naming
@@ -67,9 +24,9 @@ class JobState(object):
self._percent = 0
self._complete = False
self._request = request
self._cond = threading.Condition(self.rlock)
self._ec = 0
self._stderr = ''
self._waiting_clients = []
# This is an lvm command that is just taking too long and doesn't
# support background operation
@@ -92,6 +49,8 @@ class JobState(object):
with self.rlock:
if self._request:
self._complete = self._request.is_done()
if self._complete:
self._percent = 100
return self._complete
@@ -99,8 +58,7 @@ class JobState(object):
def Complete(self, value):
with self.rlock:
self._complete = value
self._percent = 100
self.notify_waiting_clients()
self._cond.notify_all()
@property
def GetError(self):
@@ -114,10 +72,29 @@ class JobState(object):
else:
return (-1, 'Job is not complete!')
def set_result(self, ec, msg):
with self.rlock:
self.Complete = True
self._ec = ec
self._stderr = msg
def dtor(self):
with self.rlock:
self._request = None
def Wait(self, timeout):
try:
with self._cond:
# Check to see if we are done, before we wait
if not self.Complete:
if timeout != -1:
self._cond.wait(timeout)
else:
self._cond.wait()
return self.Complete
except RuntimeError:
return False
@property
def Result(self):
with self.rlock:
@@ -125,40 +102,10 @@ class JobState(object):
return self._request.result()
return '/'
def add_waiting_client(self, client):
with self.rlock:
# Avoid race condition where it goes complete before we get added
# to the list of waiting clients
if self.Complete:
client.notify()
else:
self._waiting_clients.append(client)
def remove_waiting_client(self, client):
# If a waiting client timer pops before the job is done we will allow
# the client to remove themselves from the list. As we have a lock
# here and a lock in the waiting client too, and they can be obtained
# in different orders, a dead lock can occur.
# As this remove is really optional, we will try to acquire the lock
# and remove. If we are unsuccessful it's not fatal, we just delay
# the time when the objects can be garbage collected by python
if self.rlock.acquire(False):
try:
self._waiting_clients.remove(client)
finally:
self.rlock.release()
def notify_waiting_clients(self):
with self.rlock:
for c in self._waiting_clients:
c.notify()
self._waiting_clients = []
# noinspection PyPep8Naming
class Job(AutomatedProperties):
_Percent_meta = ('d', JOB_INTERFACE)
_Percent_meta = ('y', JOB_INTERFACE)
_Complete_meta = ('b', JOB_INTERFACE)
_Result_meta = ('o', JOB_INTERFACE)
_GetError_meta = ('(is)', JOB_INTERFACE)
@@ -174,25 +121,26 @@ class Job(AutomatedProperties):
@property
def Percent(self):
return dbus.Double(float(self.state.Percent))
return self.state.Percent
@Percent.setter
def Percent(self, value):
self.state.Percent = value
@property
def Complete(self):
return dbus.Boolean(self.state.Complete)
@staticmethod
def _signal_complete(obj):
obj.PropertiesChanged(
JOB_INTERFACE, dict(Complete=dbus.Boolean(obj.state.Complete)), [])
return self.state.Complete
@Complete.setter
def Complete(self, value):
self.state.Complete = value
mt_async_call(Job._signal_complete, self)
@property
def GetError(self):
return dbus.Struct(self.state.GetError, signature="(is)")
return self.state.GetError
def set_result(self, ec, msg):
self.state.set_result(ec, msg)
@dbus.service.method(dbus_interface=JOB_INTERFACE)
def Remove(self):
@@ -208,15 +156,11 @@ class Job(AutomatedProperties):
out_signature='b',
async_callbacks=('cb', 'cbe'))
def Wait(self, timeout, cb, cbe):
if timeout == 0 or self.state.Complete:
cb(dbus.Boolean(self.state.Complete))
else:
self.state.add_waiting_client(
WaitingClient(self.state, timeout, cb, cbe))
background.add_wait(self, timeout, cb, cbe)
@property
def Result(self):
return dbus.ObjectPath(self.state.Result)
return self.state.Result
@property
def lvm_id(self):

View File

@@ -21,7 +21,7 @@ from .utils import n, n32
from .loader import common
from .state import State
from . import background
from .utils import round_size, mt_remove_dbus_objects
from .utils import round_size
from .job import JobState
@@ -81,14 +81,7 @@ def lvs_state_retrieve(selection, cache_refresh=True):
n32(l['data_percent']), l['lv_attr'],
l['lv_tags'], l['lv_active'], l['data_lv'],
l['metadata_lv'], l['segtype'], l['lv_role'],
l['lv_layout'],
n32(l['snap_percent']),
n32(l['metadata_percent']),
n32(l['copy_percent']),
n32(l['sync_percent']),
n(l['lv_metadata_size']),
l['move_pv'],
l['move_pv_uuid']))
l['lv_layout']))
return rc
@@ -108,15 +101,9 @@ class LvState(State):
rc = []
for pv in sorted(cfg.db.lv_contained_pv(uuid)):
(pv_uuid, pv_name, pv_segs) = pv
pv_obj = cfg.om.get_object_path_by_uuid_lvm_id(pv_uuid, pv_name)
segs_decorate = []
for i in pv_segs:
segs_decorate.append((dbus.UInt64(i[0]),
dbus.UInt64(i[1]),
dbus.String(i[2])))
rc.append((dbus.ObjectPath(pv_obj), segs_decorate))
pv_obj = cfg.om.get_object_path_by_uuid_lvm_id(
pv_uuid, pv_name, gen_new=False)
rc.append((pv_obj, pv_segs))
return dbus.Array(rc, signature="(oa(tts))")
@@ -137,26 +124,25 @@ class LvState(State):
for l in cfg.db.hidden_lvs(self.Uuid):
full_name = "%s/%s" % (vg_name, l[1])
op = cfg.om.get_object_path_by_uuid_lvm_id(l[0], full_name)
op = cfg.om.get_object_path_by_uuid_lvm_id(
l[0], full_name, gen_new=False)
assert op
rc.append(dbus.ObjectPath(op))
rc.append(op)
return rc
def __init__(self, Uuid, Name, Path, SizeBytes,
vg_name, vg_uuid, pool_lv_uuid, PoolLv,
origin_uuid, OriginLv, DataPercent, Attr, Tags, active,
data_lv, metadata_lv, segtypes, role, layout, SnapPercent,
MetaDataPercent, CopyPercent, SyncPercent, MetaDataSizeBytes,
move_pv, move_pv_uuid):
data_lv, metadata_lv, segtypes, role, layout):
utils.init_class_from_arguments(self)
# The segtypes is possibly an array with potentially dupes or a single
# value
self._segs = dbus.Array([], signature='s')
if not isinstance(segtypes, list):
self._segs.append(dbus.String(segtypes))
self._segs.append(segtypes)
else:
self._segs.extend([dbus.String(x) for x in set(segtypes)])
self._segs.extend(set(segtypes))
self.Vg = cfg.om.get_object_path_by_uuid_lvm_id(
vg_uuid, vg_name, vg_obj_path_generate)
@@ -167,7 +153,8 @@ class LvState(State):
gen = utils.lv_object_path_method(Name, (Attr, layout, role))
self.PoolLv = cfg.om.get_object_path_by_uuid_lvm_id(
pool_lv_uuid, '%s/%s' % (vg_name, PoolLv), gen)
pool_lv_uuid, '%s/%s' % (vg_name, PoolLv),
gen)
else:
self.PoolLv = '/'
@@ -223,19 +210,13 @@ class LvState(State):
@utils.dbus_property(LV_COMMON_INTERFACE, 'Name', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Path', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SizeBytes', 't')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SegType', 'as')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Vg', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'OriginLv', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'PoolLv', 'o')
@utils.dbus_property(LV_COMMON_INTERFACE, 'Devices', "a(oa(tts))")
@utils.dbus_property(LV_COMMON_INTERFACE, 'HiddenLvs', "ao")
@utils.dbus_property(LV_COMMON_INTERFACE, 'Attr', 's')
@utils.dbus_property(LV_COMMON_INTERFACE, 'DataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SnapPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'CopyPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'SyncPercent', 'u')
@utils.dbus_property(LV_COMMON_INTERFACE, 'MetaDataSizeBytes', 't')
class LvCommon(AutomatedProperties):
_Tags_meta = ("as", LV_COMMON_INTERFACE)
_Roles_meta = ("as", LV_COMMON_INTERFACE)
@@ -251,45 +232,12 @@ class LvCommon(AutomatedProperties):
_FixedMinor_meta = ('b', LV_COMMON_INTERFACE)
_ZeroBlocks_meta = ('b', LV_COMMON_INTERFACE)
_SkipActivation_meta = ('b', LV_COMMON_INTERFACE)
_MovePv_meta = ('o', LV_COMMON_INTERFACE)
def _get_move_pv(self):
path = None
# It's likely that the move_pv is empty
if self.state.move_pv_uuid and self.state.move_pv:
path = cfg.om.get_object_path_by_uuid_lvm_id(
self.state.move_pv_uuid, self.state.move_pv)
if not path:
path = '/'
return path
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, object_path, object_state):
super(LvCommon, self).__init__(object_path, lvs_state_retrieve)
self.set_interface(LV_COMMON_INTERFACE)
self.state = object_state
self._move_pv = self._get_move_pv()
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@staticmethod
def validate_dbus_object(lv_uuid, lv_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if not dbo:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return dbo
@property
def VolumeType(self):
@@ -304,16 +252,14 @@ class LvCommon(AutomatedProperties):
'V': 'thin Volume', 't': 'thin pool', 'T': 'Thin pool data',
'e': 'raid or pool metadata or pool metadata spare',
'-': 'Unspecified'}
return dbus.Struct((self.state.Attr[0], type_map[self.state.Attr[0]]),
signature="as")
return (self.state.Attr[0], type_map[self.state.Attr[0]])
@property
def Permissions(self):
type_map = {'w': 'writable', 'r': 'read-only',
'R': 'Read-only activation of non-read-only volume',
'-': 'Unspecified'}
return dbus.Struct((self.state.Attr[1], type_map[self.state.Attr[1]]),
signature="(ss)")
return (self.state.Attr[1], type_map[self.state.Attr[1]])
@property
def AllocationPolicy(self):
@@ -322,12 +268,11 @@ class LvCommon(AutomatedProperties):
'i': 'inherited', 'I': 'inherited locked',
'l': 'cling', 'L': 'cling locked',
'n': 'normal', 'N': 'normal locked', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[2], type_map[self.state.Attr[2]]),
signature="(ss)")
return (self.state.Attr[2], type_map[self.state.Attr[2]])
@property
def FixedMinor(self):
return dbus.Boolean(self.state.Attr[3] == 'm')
return self.state.Attr[3] == 'm'
@property
def State(self):
@@ -338,32 +283,29 @@ class LvCommon(AutomatedProperties):
'd': 'mapped device present without tables',
'i': 'mapped device present with inactive table',
'X': 'unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[4], type_map[self.state.Attr[4]]),
signature="(ss)")
return (self.state.Attr[4], type_map[self.state.Attr[4]])
@property
def TargetType(self):
type_map = {'C': 'Cache', 'm': 'mirror', 'r': 'raid',
's': 'snapshot', 't': 'thin', 'u': 'unknown',
'v': 'virtual', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[6], type_map[self.state.Attr[6]]),
signature="(ss)")
return (self.state.Attr[6], type_map[self.state.Attr[6]])
@property
def ZeroBlocks(self):
return dbus.Boolean(self.state.Attr[7] == 'z')
return self.state.Attr[7] == 'z'
@property
def Health(self):
type_map = {'p': 'partial', 'r': 'refresh',
'm': 'mismatches', 'w': 'writemostly',
'X': 'X unknown', '-': 'Unspecified'}
return dbus.Struct((self.state.Attr[8], type_map[self.state.Attr[8]]),
signature="(ss)")
return (self.state.Attr[8], type_map[self.state.Attr[8]])
@property
def SkipActivation(self):
return dbus.Boolean(self.state.Attr[9] == 'k')
return self.state.Attr[9] == 'k'
def vg_name_lookup(self):
return self.state.vg_name_lookup()
@@ -389,19 +331,22 @@ class LvCommon(AutomatedProperties):
@property
def IsThinVolume(self):
return dbus.Boolean(self.state.Attr[0] == 'V')
return self.state.Attr[0] == 'V'
@property
def IsThinPool(self):
return dbus.Boolean(self.state.Attr[0] == 't')
return self.state.Attr[0] == 't'
@property
def Active(self):
return dbus.Boolean(self.state.active == "active")
return self.state.active == "active"
@property
def MovePv(self):
return dbus.ObjectPath(self._move_pv)
@dbus.service.method(
dbus_interface=LV_COMMON_INTERFACE,
in_signature='ia{sv}',
out_signature='o')
def _Future(self, tmo, open_options):
raise dbus.exceptions.DBusException(LV_COMMON_INTERFACE, 'Do not use!')
# noinspection PyPep8Naming
@@ -427,10 +372,25 @@ class Lv(LvCommon):
@staticmethod
def _remove(lv_uuid, lv_name, remove_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Remove the LV, if successful then remove from the model
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
LvCommon.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if dbo:
# Remove the LV, if successful then remove from the model
rc, out, err = cmdhandler.lv_remove(lv_name, remove_options)
if rc == 0:
cfg.om.remove_object(dbo, True)
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return '/'
@dbus.service.method(
@@ -448,11 +408,24 @@ class Lv(LvCommon):
@staticmethod
def _rename(lv_uuid, lv_name, new_name, rename_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(lv_uuid, lv_name)
# Rename the logical volume
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
rename_options)
LvCommon.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
if dbo:
# Rename the logical volume
rc, out, err = cmdhandler.lv_rename(lv_name, new_name,
rename_options)
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return '/'
@dbus.service.method(
@@ -486,27 +459,44 @@ class Lv(LvCommon):
pv_dests_and_ranges, move_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
cfg.worker_q.put(r)
@staticmethod
def _snap_shot(lv_uuid, lv_name, name, optional_size,
snapshot_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
# If you specify a size you get a 'thick' snapshot even if
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes // 80
remainder = space % 512
optional_size = space + 512 - remainder
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
rc, out, err = cmdhandler.vg_lv_snapshot(
lv_name, snapshot_options, name, optional_size)
LvCommon.handle_execute(rc, out, err)
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
if dbo:
# If you specify a size you get a 'thick' snapshot even if
# it is a thin lv
if not dbo.IsThinVolume:
if optional_size == 0:
space = dbo.SizeBytes / 80
remainder = space % 512
optional_size = space + 512 - remainder
rc, out, err = cmdhandler.vg_lv_snapshot(
lv_name, snapshot_options, name, optional_size)
if rc == 0:
return_path = '/'
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
lvs = load_lvs([full_name], emit_signal=True)[0]
for l in lvs:
return_path = l.dbus_object_path()
# Refresh self and all included PVs
cfg.load(cache_refresh=False)
return return_path
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@@ -529,24 +519,38 @@ class Lv(LvCommon):
resize_options):
# Make sure we have a dbus object representing it
pv_dests = []
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# If we have PVs, verify them
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'PV Destination (%s) not found' % pr[0])
if dbo:
# If we have PVs, verify them
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'PV Destination (%s) not found' % pr[0])
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
size_change = new_size_bytes - dbo.SizeBytes
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
pv_dests, resize_options)
LvCommon.handle_execute(rc, out, err)
return "/"
size_change = new_size_bytes - dbo.SizeBytes
rc, out, err = cmdhandler.lv_resize(dbo.lvm_id, size_change,
pv_dests, resize_options)
if rc == 0:
# Refresh what's changed
cfg.load()
return "/"
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@@ -579,11 +583,23 @@ class Lv(LvCommon):
def _lv_activate_deactivate(uuid, lv_name, activate, control_flags,
options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
rc, out, err = cmdhandler.activate_deactivate(
'lvchange', lv_name, activate, control_flags, options)
LvCommon.handle_execute(rc, out, err)
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
if dbo:
rc, out, err = cmdhandler.activate_deactivate(
'lvchange', lv_name, activate, control_flags, options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@@ -615,11 +631,25 @@ class Lv(LvCommon):
@staticmethod
def _add_rm_tags(uuid, lv_name, tags_add, tags_del, tag_options):
# Make sure we have a dbus object representing it
LvCommon.validate_dbus_object(uuid, lv_name)
rc, out, err = cmdhandler.lv_tag(
lv_name, tags_add, tags_del, tag_options)
LvCommon.handle_execute(rc, out, err)
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, lv_name)
if dbo:
rc, out, err = cmdhandler.lv_tag(
lv_name, tags_add, tags_del, tag_options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(uuid, lv_name))
@dbus.service.method(
dbus_interface=LV_INTERFACE,
@@ -668,22 +698,37 @@ class LvThinPool(Lv):
@property
def DataLv(self):
return dbus.ObjectPath(self._data_lv)
return self._data_lv
@property
def MetaDataLv(self):
return dbus.ObjectPath(self._metadata_lv)
return self._metadata_lv
@staticmethod
def _lv_create(lv_uuid, lv_name, name, size_bytes, create_options):
# Make sure we have a dbus object representing it
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
rc, out, err = cmdhandler.lv_lv_create(
lv_name, create_options, name, size_bytes)
LvCommon.handle_execute(rc, out, err)
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
return cfg.om.get_object_path_by_lvm_id(full_name)
lv_created = '/'
if dbo:
rc, out, err = cmdhandler.lv_lv_create(
lv_name, create_options, name, size_bytes)
if rc == 0:
full_name = "%s/%s" % (dbo.vg_name_lookup(), name)
lvs = load_lvs([full_name], emit_signal=True)[0]
for l in lvs:
lv_created = l.dbus_object_path()
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return lv_created
@dbus.service.method(
dbus_interface=THIN_POOL_INTERFACE,
@@ -712,21 +757,22 @@ class LvCachePool(Lv):
@property
def DataLv(self):
return dbus.ObjectPath(self._data_lv)
return self._data_lv
@property
def MetaDataLv(self):
return dbus.ObjectPath(self._metadata_lv)
return self._metadata_lv
@staticmethod
def _cache_lv(lv_uuid, lv_name, lv_object_path, cache_options):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# Make sure we have dbus object representing lv to cache
lv_to_cache = cfg.om.get_object_by_path(lv_object_path)
if lv_to_cache:
if dbo and lv_to_cache:
fcn = lv_to_cache.lv_full_name()
rc, out, err = cmdhandler.lv_cache_lv(
dbo.lv_full_name(), fcn, cache_options)
@@ -734,18 +780,27 @@ class LvCachePool(Lv):
# When we cache an LV, the cache pool and the lv that is getting
# cached need to be removed from the object manager and
# re-created as their interfaces have changed!
mt_remove_dbus_objects((dbo, lv_to_cache))
cfg.om.remove_object(dbo, emit_signal=True)
cfg.om.remove_object(lv_to_cache, emit_signal=True)
cfg.load()
lv_converted = cfg.om.get_object_path_by_lvm_id(fcn)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE, 'LV to cache with object path %s not present!' %
lv_object_path)
msg = ""
if not dbo:
dbo += 'CachePool LV with uuid %s and name %s not present!' % \
(lv_uuid, lv_name)
if not lv_to_cache:
dbo += 'LV to cache with object path %s not present!' % \
(lv_object_path)
raise dbus.exceptions.DBusException(LV_INTERFACE, msg)
return lv_converted
@dbus.service.method(
@@ -771,30 +826,37 @@ class LvCacheLv(Lv):
@property
def CachePool(self):
return dbus.ObjectPath(self.state.PoolLv)
return self.state.PoolLv
@staticmethod
def _detach_lv(lv_uuid, lv_name, detach_options, destroy_cache):
# Make sure we have a dbus object representing cache pool
dbo = LvCommon.validate_dbus_object(lv_uuid, lv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(lv_uuid, lv_name)
# Get current cache name
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
if dbo:
rc, out, err = cmdhandler.lv_detach_cache(
dbo.lv_full_name(), detach_options, destroy_cache)
if rc == 0:
# The cache pool gets removed as hidden and put back to
# visible, so lets delete
mt_remove_dbus_objects((cache_pool, dbo))
cfg.load()
# Get current cache name
cache_pool = cfg.om.get_object_by_path(dbo.CachePool)
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
rc, out, err = cmdhandler.lv_detach_cache(
dbo.lv_full_name(), detach_options, destroy_cache)
if rc == 0:
# The cache pool gets removed as hidden and put back to
# visible, so lets delete
cfg.om.remove_object(cache_pool, emit_signal=True)
cfg.om.remove_object(dbo, emit_signal=True)
cfg.load()
uncached_lv_path = cfg.om.get_object_path_by_lvm_id(lv_name)
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
LV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
'LV with uuid %s and name %s not present!' %
(lv_uuid, lv_name))
return uncached_lv_path
@dbus.service.method(
@@ -828,4 +890,4 @@ class LvSnapShot(Lv):
(SNAPSHOT_INTERFACE, self.Uuid, self.lvm_id,
merge_options, job_state), cb, cbe, False,
job_state)
background.cmd_runner(r)
cfg.worker_q.put(r)

View File

@@ -0,0 +1,184 @@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015-2016, Vratislav Podzimek <vpodzime@redhat.com>
import subprocess
import shlex
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
import traceback
import sys
import re
try:
from .cfg import LVM_CMD
from .utils import log_debug, log_error
except:
from cfg import LVM_CMD
from utils import log_debug, log_error
SHELL_PROMPT = "lvm> "
def _quote_arg(arg):
if len(shlex.split(arg)) > 1:
return '"%s"' % arg
else:
return arg
class LVMShellProxy(object):
def _read_until_prompt(self):
prev_ec = None
stdout = ""
while not stdout.endswith(SHELL_PROMPT):
try:
tmp = self.lvm_shell.stdout.read()
if tmp:
stdout += tmp.decode("utf-8")
except IOError:
# nothing written yet
pass
# strip the prompt from the STDOUT before returning and grab the exit
# code if it's available
m = self.re.match(stdout)
if m:
prev_ec = int(m.group(2))
strip_idx = -1 * len(m.group(1))
else:
strip_idx = -1 * len(SHELL_PROMPT)
return stdout[:strip_idx], prev_ec
def _read_line(self):
while True:
try:
tmp = self.lvm_shell.stdout.readline()
if tmp:
return tmp.decode("utf-8")
except IOError:
pass
def _discard_echo(self, expected):
line = ""
while line != expected:
# GNU readline inserts some interesting characters at times...
line += self._read_line().replace(' \r', '')
def _write_cmd(self, cmd):
cmd_bytes = bytes(cmd, "utf-8")
num_written = self.lvm_shell.stdin.write(cmd_bytes)
assert (num_written == len(cmd_bytes))
self.lvm_shell.stdin.flush()
def _lvm_echos(self):
echo = False
cmd = "version\n"
self._write_cmd(cmd)
line = self._read_line()
if line == cmd:
echo = True
self._read_until_prompt()
return echo
def __init__(self):
self.re = re.compile(".*(\[(-?[0-9]+)\] lvm> $)", re.DOTALL)
# run the lvm shell
self.lvm_shell = subprocess.Popen(
[LVM_CMD], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
flags = fcntl(self.lvm_shell.stdout, F_GETFL)
fcntl(self.lvm_shell.stdout, F_SETFL, flags | O_NONBLOCK)
flags = fcntl(self.lvm_shell.stderr, F_GETFL)
fcntl(self.lvm_shell.stderr, F_SETFL, flags | O_NONBLOCK)
# wait for the first prompt
self._read_until_prompt()
# Check to see if the version of LVM we are using is running with
# gnu readline which will echo our writes from stdin to stdout
self.echo = self._lvm_echos()
def call_lvm(self, argv, debug=False):
# create the command string
cmd = " ".join(_quote_arg(arg) for arg in argv)
cmd += "\n"
# run the command by writing it to the shell's STDIN
self._write_cmd(cmd)
# If lvm is utilizing gnu readline, it echos stdin to stdout
if self.echo:
self._discard_echo(cmd)
# read everything from the STDOUT to the next prompt
stdout, exit_code = self._read_until_prompt()
# read everything from STDERR if there's something (we waited for the
# prompt on STDOUT so there should be all or nothing at this point on
# STDERR)
stderr = None
try:
t_error = self.lvm_shell.stderr.read()
if t_error:
stderr = t_error.decode("utf-8")
except IOError:
# nothing on STDERR
pass
if exit_code is not None:
rc = exit_code
else:
# LVM does write to stderr even when it did complete successfully,
# so without having the exit code in the prompt we can never be
# sure.
if stderr:
rc = 1
else:
rc = 0
if debug or rc != 0:
log_error(('CMD: %s' % cmd))
log_error(("EC = %d" % rc))
log_error(("STDOUT=\n %s\n" % stdout))
log_error(("STDERR=\n %s\n" % stderr))
return (rc, stdout, stderr)
def __del__(self):
self.lvm_shell.terminate()
if __name__ == "__main__":
shell = LVMShellProxy()
in_line = "start"
try:
while in_line:
in_line = input("lvm> ")
if in_line:
ret, out, err, = shell.call_lvm(in_line.split())
print(("RET: %d" % ret))
print(("OUT:\n%s" % out))
print(("ERR:\n%s" % err))
except KeyboardInterrupt:
pass
except EOFError:
pass
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
print()

View File

@@ -1,269 +0,0 @@
#!@PYTHON3@
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015-2016, Vratislav Podzimek <vpodzime@redhat.com>
import subprocess
import shlex
from fcntl import fcntl, F_GETFL, F_SETFL
import os
import traceback
import sys
import tempfile
import time
import select
import copy
try:
import simplejson as json
except ImportError:
import json
from lvmdbusd.cfg import LVM_CMD
from lvmdbusd.utils import log_debug, log_error, add_no_notify
SHELL_PROMPT = "lvm> "
def _quote_arg(arg):
if len(shlex.split(arg)) > 1:
return '"%s"' % arg
else:
return arg
class LVMShellProxy(object):
@staticmethod
def _read(stream):
tmp = stream.read()
if tmp:
return tmp.decode("utf-8")
return ''
# Read until we get prompt back and a result
# @param: no_output Caller expects no output to report FD
# Returns stdout, report, stderr (report is JSON!)
def _read_until_prompt(self, no_output=False):
stdout = ""
report = ""
stderr = ""
keep_reading = True
extra_passes = 3
report_json = {}
prev_report_len = 0
# Try reading from all FDs to prevent one from filling up and causing
# a hang. Keep reading until we get the prompt back and the report
# FD does not contain valid JSON
while keep_reading:
try:
rd_fd = [
self.lvm_shell.stdout.fileno(),
self.report_stream.fileno(),
self.lvm_shell.stderr.fileno()]
ready = select.select(rd_fd, [], [], 2)
for r in ready[0]:
if r == self.lvm_shell.stdout.fileno():
stdout += LVMShellProxy._read(self.lvm_shell.stdout)
elif r == self.report_stream.fileno():
report += LVMShellProxy._read(self.report_stream)
elif r == self.lvm_shell.stderr.fileno():
stderr += LVMShellProxy._read(self.lvm_shell.stderr)
# Check to see if the lvm process died on us
if self.lvm_shell.poll():
raise Exception(self.lvm_shell.returncode, "%s" % stderr)
if stdout.endswith(SHELL_PROMPT):
if no_output:
keep_reading = False
else:
cur_report_len = len(report)
if cur_report_len != 0:
# Only bother to parse if we have more data
if prev_report_len != cur_report_len:
prev_report_len = cur_report_len
# Parse the JSON if it's good we are done,
# if not we will try to read some more.
try:
report_json = json.loads(report)
keep_reading = False
except ValueError:
pass
if keep_reading:
extra_passes -= 1
if extra_passes <= 0:
if len(report):
raise ValueError("Invalid json: %s" %
report)
else:
raise ValueError(
"lvm returned no JSON output!")
except IOError as ioe:
log_debug(str(ioe))
pass
return stdout, report_json, stderr
def _write_cmd(self, cmd):
cmd_bytes = bytes(cmd, "utf-8")
num_written = self.lvm_shell.stdin.write(cmd_bytes)
assert (num_written == len(cmd_bytes))
self.lvm_shell.stdin.flush()
@staticmethod
def _make_non_block(stream):
flags = fcntl(stream, F_GETFL)
fcntl(stream, F_SETFL, flags | os.O_NONBLOCK)
def __init__(self):
# Create a temp directory
tmp_dir = tempfile.mkdtemp(prefix="lvmdbus_")
tmp_file = "%s/lvmdbus_report" % (tmp_dir)
try:
# Lets create fifo for the report output
os.mkfifo(tmp_file, 0o600)
except FileExistsError:
pass
# We have to open non-blocking as the other side isn't open until
# we actually fork the process.
self.report_fd = os.open(tmp_file, os.O_NONBLOCK)
self.report_stream = os.fdopen(self.report_fd, 'rb', 0)
# Setup the environment for using our own socket for reporting
local_env = copy.deepcopy(os.environ)
local_env["LVM_REPORT_FD"] = "32"
local_env["LVM_COMMAND_PROFILE"] = "lvmdbusd"
# Disable the abort logic if lvm logs too much, which easily happens
# when utilizing the lvm shell.
local_env["LVM_LOG_FILE_MAX_LINES"] = "0"
# run the lvm shell
self.lvm_shell = subprocess.Popen(
[LVM_CMD + " 32>%s" % tmp_file],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=local_env,
stderr=subprocess.PIPE, close_fds=True, shell=True)
try:
LVMShellProxy._make_non_block(self.lvm_shell.stdout)
LVMShellProxy._make_non_block(self.lvm_shell.stderr)
# wait for the first prompt
errors = self._read_until_prompt(no_output=True)[2]
if errors and len(errors):
raise RuntimeError(errors)
except:
raise
finally:
# These will get deleted when the FD count goes to zero so we
# can be sure to clean up correctly no matter how we finish
os.unlink(tmp_file)
os.rmdir(tmp_dir)
def get_error_msg(self):
# We got an error, lets go fetch the error message
self._write_cmd('lastlog\n')
# read everything from the STDOUT to the next prompt
stdout, report_json, stderr = self._read_until_prompt()
if 'log' in report_json:
error_msg = ""
# Walk the entire log array and build an error string
for log_entry in report_json['log']:
if log_entry['log_type'] == "error":
if error_msg:
error_msg += ', ' + log_entry['log_message']
else:
error_msg = log_entry['log_message']
return error_msg
return 'No error reason provided! (missing "log" section)'
def call_lvm(self, argv, debug=False):
rc = 1
error_msg = ""
if self.lvm_shell.poll():
raise Exception(
self.lvm_shell.returncode,
"Underlying lvm shell process is not present!")
argv = add_no_notify(argv)
# create the command string
cmd = " ".join(_quote_arg(arg) for arg in argv)
cmd += "\n"
# run the command by writing it to the shell's STDIN
self._write_cmd(cmd)
# read everything from the STDOUT to the next prompt
stdout, report_json, stderr = self._read_until_prompt()
# Parse the report to see what happened
if 'log' in report_json:
if report_json['log'][-1:][0]['log_ret_code'] == '1':
rc = 0
else:
error_msg = self.get_error_msg()
if debug or rc != 0:
log_error(('CMD: %s' % cmd))
log_error(("EC = %d" % rc))
log_error(("ERROR_MSG=\n %s\n" % error_msg))
return rc, report_json, error_msg
def exit_shell(self):
try:
self._write_cmd('exit\n')
except Exception as e:
log_error(str(e))
def __del__(self):
try:
self.lvm_shell.terminate()
except:
pass
if __name__ == "__main__":
shell = LVMShellProxy()
in_line = "start"
try:
while in_line:
in_line = input("lvm> ")
if in_line:
start = time.time()
ret, out, err = shell.call_lvm(in_line.split())
end = time.time()
print(("RC: %d" % ret))
print(("OUT:\n%s" % out))
print(("ERR:\n%s" % err))
print("Command = %f seconds" % (end - start))
except KeyboardInterrupt:
pass
except EOFError:
pass
except Exception:
traceback.print_exc(file=sys.stdout)

View File

@@ -1,4 +1,4 @@
#!@PYTHON3@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
@@ -15,12 +15,16 @@ import pprint as prettyprint
import os
import sys
from lvmdbusd import cmdhandler
from lvmdbusd.utils import log_debug, log_error
try:
from . import cmdhandler
from .utils import log_debug, log_error
except SystemError:
import cmdhandler
from utils import log_debug
class DataStore(object):
def __init__(self, usejson=True):
def __init__(self, usejson=None):
self.pvs = {}
self.vgs = {}
self.lvs = {}
@@ -38,7 +42,7 @@ class DataStore(object):
# self.refresh()
self.num_refreshes = 0
if usejson:
if usejson is None:
self.json = cmdhandler.supports_json()
else:
self.json = usejson
@@ -68,20 +72,6 @@ class DataStore(object):
else:
table[key] = record
@staticmethod
def _pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup):
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
@staticmethod
def _parse_pvs(_pvs):
pvs = sorted(_pvs, key=lambda pk: pk['pv_name'])
@@ -95,7 +85,18 @@ class DataStore(object):
c_pvs, p['pv_uuid'], p,
['pvseg_start', 'pvseg_size', 'segtype'])
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
return c_pvs, c_lookup, c_pvs_in_vgs
@staticmethod
@@ -135,7 +136,17 @@ class DataStore(object):
i['pvseg_size'] = i['pv_pe_count']
i['segtype'] = 'free'
DataStore._pvs_parse_common(c_pvs, c_pvs_in_vgs, c_lookup)
for p in c_pvs.values():
# Capture which PVs are associated with which VG
if p['vg_uuid'] not in c_pvs_in_vgs:
c_pvs_in_vgs[p['vg_uuid']] = []
if p['vg_name']:
c_pvs_in_vgs[p['vg_uuid']].append(
(p['pv_name'], p['pv_uuid']))
# Lookup for translating between /dev/<name> and pv uuid
c_lookup[p['pv_name']] = p['pv_uuid']
return c_pvs, c_lookup, c_pvs_in_vgs
@@ -426,12 +437,6 @@ class DataStore(object):
rc.append(self.pvs[self.pv_path_to_uuid[s]])
return rc
def pv_missing(self, pv_uuid):
if pv_uuid in self.pvs:
if self.pvs[pv_uuid]['pv_missing'] == '':
return False
return True
def fetch_vgs(self, vg_name):
if not vg_name:
return self.vgs.values()
@@ -515,7 +520,6 @@ if __name__ == "__main__":
print("PVS")
for v in ds.pvs.values():
pp.pprint(v)
print('PV missing is %s' % ds.pv_missing(v['pv_uuid']))
print("VGS")
for v in ds.vgs.values():

View File

@@ -1,4 +1,4 @@
#!@PYTHON3@
#!/usr/bin/env python3
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
@@ -10,7 +10,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from lvmdbusd import main
import lvmdbusd
if __name__ == '__main__':
sys.exit(main())
sys.exit(lvmdbusd.main())

View File

@@ -10,27 +10,25 @@
from . import cfg
from . import objectmanager
from . import utils
from .cfg import BUS_NAME, BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH
from .cfg import BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH
import threading
from . import cmdhandler
import time
import signal
import dbus
import dbus.mainloop.glib
from . import lvmdb
# noinspection PyUnresolvedReferences
from gi.repository import GLib
from .fetch import StateUpdate
from .fetch import load
from .manager import Manager
from .background import background_reaper
import traceback
import queue
from . import udevwatch
from .utils import log_debug, log_error
from .utils import log_debug
import argparse
import os
import sys
from .cmdhandler import LvmFlightRecorder
from .request import RequestEntry
from .refresh import handle_external_event, event_complete
class Lvm(objectmanager.ObjectManager):
@@ -38,16 +36,54 @@ class Lvm(objectmanager.ObjectManager):
super(Lvm, self).__init__(object_path, BASE_INTERFACE)
def _discard_pending_refreshes():
# We just handled a refresh, if we have any in the queue they can be
# removed because by definition they are older than the refresh we just did.
# As we limit the number of refreshes getting into the queue
# we should only ever have one to remove.
requests = []
while not cfg.worker_q.empty():
try:
r = cfg.worker_q.get(block=False)
if r.method != handle_external_event:
requests.append(r)
else:
# Make sure we make this event complete even though it didn't
# run, otherwise no other events will get processed
event_complete()
break
except queue.Empty:
break
# Any requests we removed, but did not discard need to be re-queued
for r in requests:
cfg.worker_q.put(r)
def process_request():
while cfg.run.value != 0:
# noinspection PyBroadException
try:
req = cfg.worker_q.get(True, 5)
start = cfg.db.num_refreshes
log_debug(
"Running method: %s with args %s" %
(str(req.method), str(req.arguments)))
req.run_cmd()
log_debug("Method complete ")
end = cfg.db.num_refreshes
num_refreshes = end - start
if num_refreshes > 0:
_discard_pending_refreshes()
if num_refreshes > 1:
log_debug(
"Inspect method %s for too many refreshes" %
(str(req.method)))
log_debug("Complete ")
except queue.Empty:
pass
except Exception:
@@ -55,142 +91,92 @@ def process_request():
utils.log_error("process_request exception: \n%s" % st)
def check_bb_size(value):
v = int(value)
if v < 0:
raise argparse.ArgumentTypeError(
"positive integers only ('%s' invalid)" % value)
return v
def install_signal_handlers():
# Because of the glib main loop stuff the python signal handler code is
# apparently not usable and we need to use the glib calls instead
signal_add = None
if hasattr(GLib, 'unix_signal_add'):
signal_add = GLib.unix_signal_add
elif hasattr(GLib, 'unix_signal_add_full'):
signal_add = GLib.unix_signal_add_full
if signal_add:
signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, utils.handler, signal.SIGHUP)
signal_add(GLib.PRIORITY_HIGH, signal.SIGINT, utils.handler, signal.SIGINT)
signal_add(GLib.PRIORITY_HIGH, signal.SIGUSR1, utils.handler, signal.SIGUSR1)
else:
log_error("GLib.unix_signal_[add|add_full] are NOT available!")
def main():
start = time.time()
# Add simple command line handling
parser = argparse.ArgumentParser()
parser.add_argument(
"--udev", action='store_true',
help="Use udev for updating state",
default=False,
dest='use_udev')
parser.add_argument(
"--debug", action='store_true',
help="Dump debug messages", default=False,
dest='debug')
parser.add_argument(
"--nojson", action='store_false',
help="Do not use LVM JSON output (disables lvmshell)", default=True,
dest='use_json')
parser.add_argument(
"--lvmshell", action='store_true',
help="Use the lvm shell, not fork & exec lvm",
default=False,
dest='use_lvm_shell')
parser.add_argument(
"--blackboxsize",
help="Size of the black box flight recorder, 0 to disable",
default=10,
type=check_bb_size,
dest='bb_size')
parser.add_argument("--udev", action='store_true',
help="Use udev for updating state", default=False,
dest='use_udev')
parser.add_argument("--debug", action='store_true',
help="Dump debug messages", default=False,
dest='debug')
parser.add_argument("--nojson", action='store_false',
help="Do not use LVM JSON output", default=None,
dest='use_json')
use_session = os.getenv('LVMDBUSD_USE_SESSION', False)
# Ensure that we get consistent output for parsing stdout/stderr
os.environ["LC_ALL"] = "C"
cfg.args = parser.parse_args()
cfg.create_request_entry = RequestEntry
args = parser.parse_args()
# We create a flight recorder in cmdhandler too, but we replace it here
# as the user may be specifying a different size. The default one in
# cmdhandler is for when we are running other code with a different main.
cfg.blackbox = LvmFlightRecorder(cfg.args.bb_size)
if cfg.args.use_lvm_shell and not cfg.args.use_json:
log_error("You cannot specify --lvmshell and --nojson")
sys.exit(1)
cfg.DEBUG = args.debug
# List of threads that we start up
thread_list = []
install_signal_handlers()
start = time.time()
# Install signal handlers
for s in [signal.SIGHUP, signal.SIGINT]:
try:
signal.signal(s, utils.handler)
except RuntimeError:
pass
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
dbus.mainloop.glib.threads_init()
cmdhandler.set_execution(cfg.args.use_lvm_shell)
if use_session:
cfg.bus = dbus.SessionBus()
else:
cfg.bus = dbus.SystemBus()
# The base name variable needs to exist for things to work.
# noinspection PyUnusedLocal
base_name = dbus.service.BusName(BUS_NAME, cfg.bus)
base_name = dbus.service.BusName(BASE_INTERFACE, cfg.bus)
cfg.om = Lvm(BASE_OBJ_PATH)
cfg.om.register_object(Manager(MANAGER_OBJ_PATH))
cfg.db = lvmdb.DataStore(cfg.args.use_json)
cfg.load = load
# Using a thread to process requests, we cannot hang the dbus library
# thread that is handling the dbus interface
thread_list.append(threading.Thread(target=process_request,
name='process_request'))
cfg.db = lvmdb.DataStore(args.use_json)
# Have a single thread handling updating lvm and the dbus model so we
# don't have multiple threads doing this as the same time
updater = StateUpdate()
thread_list.append(updater.thread)
# Start up thread to monitor pv moves
thread_list.append(
threading.Thread(target=background_reaper, name="pv_move_reaper"))
cfg.load = updater.load
# Using a thread to process requests.
thread_list.append(threading.Thread(target=process_request))
cfg.load(refresh=False, emit_signal=False)
cfg.loop = GLib.MainLoop()
for thread in thread_list:
thread.damon = True
thread.start()
# Add udev watching
if cfg.args.use_udev:
log_debug('Utilizing udev to trigger updates')
# In all cases we are going to monitor for udev until we get an
# ExternalEvent. In the case where we get an external event and the user
# didn't specify --udev we will stop monitoring udev
udevwatch.add()
for process in thread_list:
process.damon = True
process.start()
end = time.time()
log_debug(
'Service ready! total time= %.4f, lvm time= %.4f count= %d' %
'Service ready! total time= %.2f, lvm time= %.2f count= %d' %
(end - start, cmdhandler.total_time, cmdhandler.total_count),
'bg_black', 'fg_light_green')
# Add udev watching
if args.use_udev:
log_debug('Utilizing udev to trigger updates')
udevwatch.add()
try:
if cfg.run.value != 0:
cfg.loop.run()
udevwatch.remove()
for thread in thread_list:
thread.join()
if args.use_udev:
udevwatch.remove()
for process in thread_list:
process.join()
except KeyboardInterrupt:
# If we are unable to register signal handler, we will end up here when
# the service gets a ^C or a kill -2 <parent pid>
utils.handler(signal.SIGINT)
utils.handler(signal.SIGINT, None)
return 0

View File

@@ -6,6 +6,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .automatedproperties import AutomatedProperties
from . import utils
@@ -13,13 +14,14 @@ from .cfg import MANAGER_INTERFACE
import dbus
from . import cfg
from . import cmdhandler
from .fetch import load_pvs, load_vgs
from .request import RequestEntry
from . import udevwatch
from .refresh import event_add
# noinspection PyPep8Naming
class Manager(AutomatedProperties):
_Version_meta = ("s", MANAGER_INTERFACE)
_Version_meta = ("t", MANAGER_INTERFACE)
def __init__(self, object_path):
super(Manager, self).__init__(object_path)
@@ -27,31 +29,31 @@ class Manager(AutomatedProperties):
@property
def Version(self):
return dbus.String('1.0.0')
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return '1.0.0'
@staticmethod
def _pv_create(device, create_options):
# Check to see if we are already trying to create a PV for an existing
# PV
pv = cfg.om.get_object_path_by_uuid_lvm_id(device, device)
pv = cfg.om.get_object_path_by_uuid_lvm_id(
device, device, None, False)
if pv:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE, "PV %s Already exists!" % device)
MANAGER_INTERFACE, "PV Already exists!")
created_pv = []
rc, out, err = cmdhandler.pv_create(create_options, [device])
Manager.handle_execute(rc, out, err)
return cfg.om.get_object_path_by_lvm_id(device)
if rc == 0:
pvs = load_pvs([device], emit_signal=True)[0]
for p in pvs:
created_pv = p.dbus_object_path()
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return created_pv
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
@@ -78,8 +80,20 @@ class Manager(AutomatedProperties):
MANAGER_INTERFACE, 'object path = %s not found' % p)
rc, out, err = cmdhandler.vg_create(create_options, pv_devices, name)
Manager.handle_execute(rc, out, err)
return cfg.om.get_object_path_by_lvm_id(name)
created_vg = "/"
if rc == 0:
vgs = load_vgs([name], emit_signal=True)[0]
for v in vgs:
created_vg = v.dbus_object_path()
# Update the PVS
load_pvs(refresh=True, emit_signal=True, cache_refresh=False)
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
return created_vg
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
@@ -100,10 +114,6 @@ class Manager(AutomatedProperties):
# This is a diagnostic and should not be run in normal operation, so
# lets remove the log entries for refresh as it's implied.
# Run an internal diagnostic on the object manager look up tables
lc = cfg.om.validate_lookups()
rc = cfg.load(log=False)
if rc != 0:
@@ -111,7 +121,7 @@ class Manager(AutomatedProperties):
'bg_black', 'fg_light_red')
else:
utils.log_debug('Manager.Refresh - exit %d' % (rc))
return rc + lc
return rc
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
@@ -131,28 +141,11 @@ class Manager(AutomatedProperties):
r = RequestEntry(-1, Manager._refresh, (), cb, cbe, False)
cfg.worker_q.put(r)
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE)
def FlightRecorderDump(self):
"""
Dump the flight recorder to syslog
"""
cfg.blackbox.dump()
@staticmethod
def _lookup_by_lvm_id(key):
p = cfg.om.get_object_path_by_uuid_lvm_id(key, key)
if not p:
p = '/'
utils.log_debug('LookUpByLvmId: key = %s, result = %s' % (key, p))
return p
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='s',
out_signature='o',
async_callbacks=('cb', 'cbe'))
def LookUpByLvmId(self, key, cb, cbe):
out_signature='o')
def LookUpByLvmId(self, key):
"""
Given a lvm id in one of the forms:
@@ -166,50 +159,29 @@ class Manager(AutomatedProperties):
:param key: The lookup value
:return: Return the object path. If object not found you will get '/'
"""
r = RequestEntry(-1, Manager._lookup_by_lvm_id, (key,), cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _use_lvm_shell(yes_no):
return dbus.Boolean(cmdhandler.set_execution(yes_no))
p = cfg.om.get_object_path_by_uuid_lvm_id(
key, key, gen_new=False)
if p:
return p
return '/'
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='b', out_signature='b',
async_callbacks=('cb', 'cbe'))
def UseLvmShell(self, yes_no, cb, cbe):
in_signature='b')
def UseLvmShell(self, yes_no):
"""
Allow the client to enable/disable lvm shell, used for testing
:param yes_no:
:param cb: dbus python call back parameter, not client visible
:param cbe: dbus python error call back parameter, not client visible
:return: Boolean
:return: Nothing
"""
r = RequestEntry(-1, Manager._use_lvm_shell, (yes_no,), cb, cbe, False)
cfg.worker_q.put(r)
@staticmethod
def _external_event(command):
utils.log_debug("Processing _external_event= %s" % command,
'bg_black', 'fg_orange')
cfg.load()
cmdhandler.set_execution(yes_no)
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,
in_signature='s', out_signature='i')
def ExternalEvent(self, command):
utils.log_debug("ExternalEvent %s" % command)
# If a user didn't explicitly specify udev, we will turn it off now.
if not cfg.args.use_udev:
if udevwatch.remove():
utils.log_debug("ExternalEvent received, disabling "
"udev monitoring")
# We are dependent on external events now to stay current!
cfg.got_external_event = True
r = RequestEntry(
-1, Manager._external_event, (command,), None, None, False)
cfg.worker_q.put(r)
event_add((command,))
return dbus.Int32(0)
@staticmethod
@@ -219,8 +191,15 @@ class Manager(AutomatedProperties):
activate, cache, device_path,
major_minor, scan_options)
Manager.handle_execute(rc, out, err)
return '/'
if rc == 0:
# This could potentially change the state quite a bit, so lets
# update everything to be safe
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
MANAGER_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@dbus.service.method(
dbus_interface=MANAGER_INTERFACE,

View File

@@ -12,9 +12,8 @@ import threading
import traceback
import dbus
import os
import copy
from . import cfg
from .utils import log_debug, pv_obj_path_generate, log_error
from .utils import log_debug
from .automatedproperties import AutomatedProperties
@@ -32,12 +31,14 @@ class ObjectManager(AutomatedProperties):
self._id_to_object_path = {}
self.rlock = threading.RLock()
@staticmethod
def _get_managed_objects(obj):
with obj.rlock:
@dbus.service.method(
dbus_interface="org.freedesktop.DBus.ObjectManager",
out_signature='a{oa{sa{sv}}}')
def GetManagedObjects(self):
with self.rlock:
rc = {}
try:
for k, v in list(obj._objects.items()):
for k, v in list(self._objects.items()):
path, props = v[0].emit_data()
rc[path] = props
except Exception:
@@ -45,14 +46,6 @@ class ObjectManager(AutomatedProperties):
sys.exit(1)
return rc
@dbus.service.method(
dbus_interface="org.freedesktop.DBus.ObjectManager",
out_signature='a{oa{sa{sv}}}', async_callbacks=('cb', 'cbe'))
def GetManagedObjects(self, cb, cbe):
r = cfg.create_request_entry(-1, ObjectManager._get_managed_objects,
(self, ), cb, cbe, False)
cfg.worker_q.put(r)
def locked(self):
"""
If some external code need to run across a number of different
@@ -77,37 +70,12 @@ class ObjectManager(AutomatedProperties):
log_debug(('SIGNAL: InterfacesRemoved(%s, %s)' %
(str(object_path), str(interface_list))))
def validate_lookups(self):
with self.rlock:
tmp_lookups = copy.deepcopy(self._id_to_object_path)
# iterate over all we know, removing from the copy. If all is well
# we will have zero items left over
for path, md in self._objects.items():
obj, lvm_id, uuid = md
if lvm_id:
assert path == tmp_lookups[lvm_id]
del tmp_lookups[lvm_id]
if uuid:
assert path == tmp_lookups[uuid]
del tmp_lookups[uuid]
rc = len(tmp_lookups)
if rc:
# Error condition
log_error("_id_to_object_path has extraneous lookups!")
for key, path in tmp_lookups.items():
log_error("Key= %s, path= %s" % (key, path))
return rc
def _lookup_add(self, obj, path, lvm_id, uuid):
"""
Store information about what we added to the caches so that we
can remove it cleanly
:param obj: The dbus object we are storing
:param lvm_id: The lvm id for the asset
:param lvm_id: The user name for the asset
:param uuid: The uuid for the asset
:return:
"""
@@ -117,12 +85,7 @@ class ObjectManager(AutomatedProperties):
self._lookup_remove(path)
self._objects[path] = (obj, lvm_id, uuid)
# Make sure we have one or the other
assert lvm_id or uuid
if lvm_id:
self._id_to_object_path[lvm_id] = path
self._id_to_object_path[lvm_id] = path
if uuid:
self._id_to_object_path[uuid] = path
@@ -131,13 +94,8 @@ class ObjectManager(AutomatedProperties):
# Note: Only called internally, lock implied
if obj_path in self._objects:
(obj, lvm_id, uuid) = self._objects[obj_path]
if lvm_id in self._id_to_object_path:
del self._id_to_object_path[lvm_id]
if uuid in self._id_to_object_path:
del self._id_to_object_path[uuid]
del self._id_to_object_path[lvm_id]
del self._id_to_object_path[uuid]
del self._objects[obj_path]
def lookup_update(self, dbus_obj, new_uuid, new_lvm_id):
@@ -167,7 +125,7 @@ class ObjectManager(AutomatedProperties):
path, props = dbus_object.emit_data()
# print('Registering object path %s for %s' %
# (path, dbus_object.lvm_id))
# (path, dbus_object.lvm_id))
# We want fast access to the object by a number of different ways
# so we use multiple hashs with different keys
@@ -215,7 +173,7 @@ class ObjectManager(AutomatedProperties):
def get_object_by_uuid_lvm_id(self, uuid, lvm_id):
with self.rlock:
return self.get_object_by_path(
self.get_object_path_by_uuid_lvm_id(uuid, lvm_id))
self.get_object_path_by_uuid_lvm_id(uuid, lvm_id, None, False))
def get_object_by_lvm_id(self, lvm_id):
"""
@@ -223,9 +181,8 @@ class ObjectManager(AutomatedProperties):
:param lvm_id: The lvm identifier
"""
with self.rlock:
lookup_rc = self._id_lookup(lvm_id)
if lookup_rc:
return self.get_object_by_path(lookup_rc)
if lvm_id in self._id_to_object_path:
return self.get_object_by_path(self._id_to_object_path[lvm_id])
return None
def get_object_path_by_lvm_id(self, lvm_id):
@@ -235,9 +192,8 @@ class ObjectManager(AutomatedProperties):
:return: Object path or '/' if not found
"""
with self.rlock:
lookup_rc = self._id_lookup(lvm_id)
if lookup_rc:
return lookup_rc
if lvm_id in self._id_to_object_path:
return self._id_to_object_path[lvm_id]
return '/'
def _uuid_verify(self, path, uuid, lvm_id):
@@ -250,111 +206,78 @@ class ObjectManager(AutomatedProperties):
:return: None
"""
# This gets called when we found an object based on lvm_id, ensure
# uuid is correct too, as they can change. There is no durable
# non-changeable name in lvm
# uuid is correct too, as they can change
if lvm_id != uuid:
if uuid and uuid not in self._id_to_object_path:
if uuid not in self._id_to_object_path:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
def _lvm_id_verify(self, path, uuid, lvm_id):
def _return_lookup(self, uuid, lvm_identifier):
"""
Ensure lvm_id is present for a successful uuid lookup
NOTE: Internal call, assumes under object manager lock
:param path: Path to object we looked up
:param uuid: uuid used to find object
:param lvm_id: lvm_id to verify
:return: None
We found an identifier, so lets return the path to the found object
:param uuid: The lvm uuid
:param lvm_identifier: The lvm_id used to find object
:return:
"""
# This gets called when we found an object based on uuid, ensure
# lvm_id is correct too, as they can change. There is no durable
# non-changeable name in lvm
if lvm_id != uuid:
if lvm_id and lvm_id not in self._id_to_object_path:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
def _id_lookup(self, the_id):
path = None
if the_id:
# The _id_to_object_path contains hash keys for everything, so
# uuid and lvm_id
if the_id in self._id_to_object_path:
path = self._id_to_object_path[the_id]
else:
if "/" in the_id:
if the_id.startswith('/'):
# We could have a pv device path lookup that failed,
# lets try canonical form and try again.
canonical = os.path.realpath(the_id)
if canonical in self._id_to_object_path:
path = self._id_to_object_path[canonical]
else:
vg, lv = the_id.split("/", 1)
int_lvm_id = vg + "/" + ("[%s]" % lv)
if int_lvm_id in self._id_to_object_path:
path = self._id_to_object_path[int_lvm_id]
path = self._id_to_object_path[lvm_identifier]
self._uuid_verify(path, uuid, lvm_identifier)
return path
def get_object_path_by_uuid_lvm_id(self, uuid, lvm_id, path_create=None):
def get_object_path_by_uuid_lvm_id(self, uuid, lvm_id, path_create=None,
gen_new=True):
"""
For a given lvm asset return the dbus object path registered for it.
This method first looks up by uuid and then by lvm_id. You
can search by just one by setting uuid == lvm_id (uuid or lvm_id).
If the object is not found and path_create is a not None, the
path_create function will be called to create a new object path and
register it with the object manager for the specified uuid & lvm_id.
Note: If path create is not None, uuid and lvm_id cannot be equal
:param uuid: The uuid for the lvm object we are searching for
:param lvm_id: The lvm name (eg. pv device path, vg name, lv full name)
:param path_create: If not None, create the path using this function if
we fail to find the object by uuid or lvm_id.
:returns None if lvm asset not found and path_create == None otherwise
a valid dbus object path
For a given lvm asset return the dbus object registered to it. If the
object is not found and gen_new == True and path_create is a valid
function we will create a new path, register it and return it.
:param uuid: The uuid for the lvm object
:param lvm_id: The lvm name
:param path_create: If true create an object path if not found
:param gen_new: The function used to create the new path
"""
with self.rlock:
assert lvm_id
assert uuid
if path_create:
assert uuid != lvm_id
if gen_new:
assert path_create
# Check for Manager.LookUpByLvmId query, we cannot
# check/verify/update the uuid and lvm_id lookups so don't!
if uuid == lvm_id:
path = self._id_lookup(lvm_id)
path = None
if lvm_id in self._id_to_object_path:
self._return_lookup(uuid, lvm_id)
if "/" in lvm_id:
vg, lv = lvm_id.split("/", 1)
int_lvm_id = vg + "/" + ("[%s]" % lv)
if int_lvm_id in self._id_to_object_path:
self._return_lookup(uuid, int_lvm_id)
elif lvm_id.startswith('/'):
# We could have a pv device path lookup that failed,
# lets try canonical form and try again.
canonical = os.path.realpath(lvm_id)
if canonical in self._id_to_object_path:
self._return_lookup(uuid, canonical)
if uuid and uuid in self._id_to_object_path:
# If we get here it indicates that we found the object, but
# the lvm_id lookup failed. In the case of a rename, the uuid
# will be correct, but the lvm_id will be wrong and vise versa.
# If the lvm_id does not equal the uuid, lets fix up the table
# so that lookups will be handled correctly.
path = self._id_to_object_path[uuid]
# In some cases we are looking up by one or the other, don't
# update when they are the same.
if uuid != lvm_id:
obj = self.get_object_by_path(path)
self._lookup_add(obj, path, lvm_id, uuid)
else:
# We have a uuid and a lvm_id we can do sanity checks to ensure
# that they are consistent
if gen_new:
path = path_create()
self._lookup_add(None, path, lvm_id, uuid)
# If a PV is missing it's device path is '[unknown]' or some
# other text derivation of unknown. When we find that a PV is
# missing we will clear out the lvm_id as it's likely not unique
# and thus not useful and potentially harmful for lookups.
if path_create == pv_obj_path_generate and \
cfg.db.pv_missing(uuid):
lvm_id = None
# Lets check for the uuid first
path = self._id_lookup(uuid)
if path:
# Verify the lvm_id is sane
self._lvm_id_verify(path, uuid, lvm_id)
else:
# Unable to find by UUID, lets lookup by lvm_id
path = self._id_lookup(lvm_id)
if path:
# Verify the uuid is sane
self._uuid_verify(path, uuid, lvm_id)
else:
# We have exhausted all lookups, let's create if we can
if path_create:
path = path_create()
self._lookup_add(None, path, lvm_id, uuid)
# print('get_object_path_by_lvm_id(%s, %s, %s, %s: return %s' %
# (uuid, lvm_id, str(path_create), str(gen_new), path))
# pprint('get_object_path_by_lvm_id(%s, %s, %s, %s: return %s' %
# (uuid, lvm_id, str(path_create), str(gen_new), path))
return path

View File

@@ -55,6 +55,9 @@ class PvState(State):
return self.lvm_path
def _lv_object_list(self, vg_name):
# Note we are returning "a(oa(tts))"
rc = []
if vg_name:
for lv in sorted(cfg.db.pv_contained_lv(self.lvm_id)):
@@ -66,7 +69,7 @@ class PvState(State):
lv_uuid, full_name, path_create)
rc.append((lv_path, segs))
return rc
return dbus.Array(rc, signature="(oa(tts))")
# noinspection PyUnusedLocal,PyPep8Naming
def __init__(self, lvm_path, Uuid, Name,
@@ -79,9 +82,7 @@ class PvState(State):
self.lv = self._lv_object_list(vg_name)
# It's possible to have a vg_name and no uuid with the main example
# being when the vg_name == '[unknown]'
if vg_uuid and vg_name:
if vg_name:
self.vg_path = cfg.om.get_object_path_by_uuid_lvm_id(
vg_uuid, vg_name, vg_obj_path_generate)
else:
@@ -137,30 +138,23 @@ class Pv(AutomatedProperties):
def _remove(pv_uuid, pv_name, remove_options):
# Remove the PV, if successful then remove from the model
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
Pv.handle_execute(rc, out, err)
return '/'
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@staticmethod
def validate_dbus_object(pv_uuid, pv_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
if not dbo:
if dbo:
rc, out, err = cmdhandler.pv_remove(pv_name, remove_options)
if rc == 0:
cfg.om.remove_object(dbo, True)
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return dbo
return '/'
@dbus.service.method(
dbus_interface=PV_INTERFACE,
@@ -177,11 +171,22 @@ class Pv(AutomatedProperties):
@staticmethod
def _resize(pv_uuid, pv_name, new_size_bytes, resize_options):
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
if dbo:
rc, out, err = cmdhandler.pv_resize(pv_name, new_size_bytes,
resize_options)
Pv.handle_execute(rc, out, err)
if rc == 0:
dbo.refresh()
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return '/'
@dbus.service.method(
@@ -199,10 +204,21 @@ class Pv(AutomatedProperties):
@staticmethod
def _allocation_enabled(pv_uuid, pv_name, yes_no, allocation_options):
# Make sure we have a dbus object representing it
Pv.validate_dbus_object(pv_uuid, pv_name)
rc, out, err = cmdhandler.pv_allocatable(
pv_name, yes_no, allocation_options)
Pv.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(pv_uuid, pv_name)
if dbo:
rc, out, err = cmdhandler.pv_allocatable(
pv_name, yes_no, allocation_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
PV_INTERFACE,
'PV with uuid %s and name %s not present!' %
(pv_uuid, pv_name))
return '/'
@dbus.service.method(
@@ -225,20 +241,26 @@ class Pv(AutomatedProperties):
@property
def PeSegments(self):
if len(self.state.pe_segments):
return dbus.Array(self.state.pe_segments, signature='(tt)')
return self.state.pe_segments
return dbus.Array([], '(tt)')
@property
def Exportable(self):
return dbus.Boolean(self.state.attr[1] == 'x')
if self.state.attr[1] == 'x':
return True
return False
@property
def Allocatable(self):
return dbus.Boolean(self.state.attr[0] == 'a')
if self.state.attr[0] == 'a':
return True
return False
@property
def Missing(self):
return dbus.Boolean(self.state.attr[2] == 'm')
if self.state.attr[2] == 'm':
return True
return False
def object_path(self):
return self._object_path
@@ -253,8 +275,8 @@ class Pv(AutomatedProperties):
@property
def Lv(self):
return dbus.Array(self.state.lv, signature="(oa(tts))")
return self.state.lv
@property
def Vg(self):
return dbus.ObjectPath(self.state.vg_path)
return self.state.vg_path

View File

@@ -0,0 +1,45 @@
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Try and minimize the refreshes we do.
import threading
from .request import RequestEntry
from . import cfg
from . import utils
_rlock = threading.RLock()
_count = 0
def handle_external_event(command):
utils.log_debug("External event: '%s'" % command)
event_complete()
cfg.load()
def event_add(params):
global _rlock
global _count
with _rlock:
if _count == 0:
_count += 1
r = RequestEntry(
-1, handle_external_event,
params, None, None, False)
cfg.worker_q.put(r)
def event_complete():
global _rlock
global _count
with _rlock:
if _count > 0:
_count -= 1
return _count

View File

@@ -13,12 +13,13 @@ from gi.repository import GLib
from .job import Job
from . import cfg
import traceback
from .utils import log_error, mt_async_call
from .utils import log_error
class RequestEntry(object):
def __init__(self, tmo, method, arguments, cb, cb_error,
return_tuple=True, job_state=None):
self.tmo = tmo
self.method = method
self.arguments = arguments
self.cb = cb
@@ -34,32 +35,25 @@ class RequestEntry(object):
self._return_tuple = return_tuple
self._job_state = job_state
if tmo < 0:
if self.tmo < 0:
# Client is willing to block forever
pass
elif tmo == 0:
self._return_job()
else:
# Note: using 990 instead of 1000 for second to ms conversion to
# account for overhead. Goal is to return just before the
# timeout amount has expired. Better to be a little early than
# late.
self.timer_id = GLib.timeout_add(
tmo * 990, RequestEntry._request_timeout, self)
self.timer_id = GLib.timeout_add_seconds(
tmo, RequestEntry._request_timeout, self)
@staticmethod
def _request_timeout(r):
"""
Method which gets called when the timer runs out!
:param r: RequestEntry which timed out
:return: Result of timer_expired
:return: Nothing
"""
return r.timer_expired()
r.timer_expired()
def _return_job(self):
# Return job is only called when we create a request object or when
# we pop a timer. In both cases we are running in the correct context
# and do not need to schedule the call back in main context.
self._job = Job(self, self._job_state)
cfg.om.register_object(self._job, True)
if self._return_tuple:
@@ -116,9 +110,9 @@ class RequestEntry(object):
if error_rc == 0:
if self.cb:
if self._return_tuple:
mt_async_call(self.cb, (result, '/'))
self.cb((result, '/'))
else:
mt_async_call(self.cb, result)
self.cb(result)
else:
if self.cb_error:
if not error_exception:
@@ -129,14 +123,15 @@ class RequestEntry(object):
else:
error_exception = Exception(error_msg)
mt_async_call(self.cb_error, error_exception)
self.cb_error(error_exception)
else:
# We have a job and it's complete, indicate that it's done.
# TODO: We need to signal the job is done too.
self._job.Complete = True
self._job = None
def register_error(self, error_rc, error_message, error_exception):
self._reg_ending('/', error_rc, error_message, error_exception)
self._reg_ending(None, error_rc, error_message, error_exception)
def register_result(self, result):
self._reg_ending(result)

View File

@@ -8,42 +8,10 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pyudev
import threading
from .refresh import event_add
from . import cfg
from .request import RequestEntry
from . import utils
observer = None
observer_lock = threading.RLock()
_udev_lock = threading.RLock()
_udev_count = 0
def udev_add():
global _udev_count
with _udev_lock:
if _udev_count == 0:
_udev_count += 1
# Place this on the queue so any other operations will sequence
# behind it
r = RequestEntry(
-1, _udev_event, (), None, None, False)
cfg.worker_q.put(r)
def udev_complete():
global _udev_count
with _udev_lock:
if _udev_count > 0:
_udev_count -= 1
def _udev_event():
utils.log_debug("Processing udev event")
udev_complete()
cfg.load()
# noinspection PyUnusedLocal
@@ -68,24 +36,19 @@ def filter_event(action, device):
refresh = True
if refresh:
udev_add()
event_add(('udev',))
def add():
with observer_lock:
global observer
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by('block')
observer = pyudev.MonitorObserver(monitor, filter_event)
observer.start()
global observer
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by('block')
observer = pyudev.MonitorObserver(monitor, filter_event)
observer.start()
def remove():
with observer_lock:
global observer
if observer:
observer.stop()
observer = None
return True
return False
global observer
observer.stop()
observer = None

View File

@@ -13,15 +13,15 @@ import inspect
import ctypes
import os
import string
import datetime
import dbus
from lvmdbusd import cfg
# noinspection PyUnresolvedReferences
from gi.repository import GLib
import threading
import traceback
import signal
import dbus.service
import dbus.mainloop.glib
try:
from . import cfg
except SystemError:
import cfg
STDOUT_TTY = os.isatty(sys.stdout.fileno())
@@ -149,27 +149,17 @@ def add_properties(xml, interface, props):
:param props: Output from get_properties
:return: updated XML string
"""
root = Et.fromstring(xml)
if props:
root = Et.fromstring(xml)
interface_element = None
# Check to see if interface is present
for c in root:
# print c.attrib['name']
if c.attrib['name'] == interface:
interface_element = c
break
# Interface is not present, lets create it so we have something to
# attach the properties too
if interface_element is None:
interface_element = Et.Element("interface", name=interface)
root.append(interface_element)
# Add the properties
for p in props:
temp = '<property type="%s" name="%s" access="%s"/>\n' % \
(p['p_t'], p['p_name'], p['p_access'])
interface_element.append(Et.fromstring(temp))
for p in props:
temp = '<property type="%s" name="%s" access="%s"/>\n' % \
(p['p_t'], p['p_name'], p['p_access'])
c.append(Et.fromstring(temp))
return Et.tostring(root, encoding='utf8')
return xml
@@ -245,7 +235,7 @@ def parse_tags(tags):
if len(tags):
if ',' in tags:
return tags.split(',')
return dbus.Array(sorted([tags]), signature='s')
return sorted([tags])
return dbus.Array([], signature='s')
@@ -253,13 +243,7 @@ def _common_log(msg, *attributes):
cfg.stdout_lock.acquire()
tid = ctypes.CDLL('libc.so.6').syscall(186)
if STDOUT_TTY:
msg = "%s: %d:%d - %s" % \
(datetime.datetime.now().strftime("%b %d %H:%M:%S.%f"),
os.getpid(), tid, msg)
else:
msg = "%d:%d - %s" % (os.getpid(), tid, msg)
msg = "%d:%d - %s" % (os.getpid(), tid, msg)
if STDOUT_TTY and attributes:
print(color(msg, *attributes))
@@ -274,7 +258,7 @@ def _common_log(msg, *attributes):
# @param msg Message to output to stdout
# @return None
def log_debug(msg, *attributes):
if cfg.args and cfg.args.debug:
if cfg.DEBUG:
_common_log(msg, *attributes)
@@ -282,47 +266,12 @@ def log_error(msg, *attributes):
_common_log(msg, *attributes)
def dump_threads_stackframe():
ident_to_name = {}
for thread_object in threading.enumerate():
ident_to_name[thread_object.ident] = thread_object
stacks = []
for thread_ident, frame in sys._current_frames().items():
stack = traceback.format_list(traceback.extract_stack(frame))
# There is a possibility that a thread gets created after we have
# enumerated all threads, so this lookup table may be incomplete, so
# account for this
if thread_ident in ident_to_name:
thread_name = ident_to_name[thread_ident].name
else:
thread_name = "unknown"
stacks.append("Thread: %s" % (thread_name))
stacks.append("".join(stack))
log_error("Dumping thread stack frames!\n" + "\n".join(stacks))
# noinspection PyUnusedLocal
def handler(signum):
try:
if signum == signal.SIGUSR1:
dump_threads_stackframe()
else:
cfg.run.value = 0
log_debug('Exiting daemon with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
except:
st = traceback.format_exc()
log_error("signal handler: exception (logged, not reported!) \n %s" % st)
# It's important we report that we handled the exception for the exception
# handler to continue to work, especially for signal 10 (SIGUSR1)
return True
def handler(signum, frame):
cfg.run.value = 0
log_debug('Signal handler called with signal %d' % signum)
if cfg.loop is not None:
cfg.loop.quit()
def pv_obj_path_generate():
@@ -533,115 +482,3 @@ def validate_tag(interface, tag):
raise dbus.exceptions.DBusException(
interface, 'tag (%s) contains invalid character, allowable set(%s)'
% (tag, _ALLOWABLE_TAG_CH))
def add_no_notify(cmdline):
"""
Given a command line to execute we will see if `--config` is present, if it
is we will add the global/notify_dbus=0 to it, otherwise we will append it
to the end of the list.
:param: cmdline: The command line to inspect
:type: cmdline: list
:return: cmdline with notify_dbus config option present
:rtype: list
"""
# Only after we have seen an external event will be disable lvm from sending
# us one when we call lvm
if cfg.got_external_event:
if 'help' in cmdline:
return cmdline
if '--config' in cmdline:
for i, arg in enumerate(cmdline):
if arg == '--config':
if len(cmdline) <= i+1:
raise dbus.exceptions.DBusException("Missing value for --config option.")
cmdline[i+1] += " global/notify_dbus=0"
break
else:
cmdline.extend(['--config', 'global/notify_dbus=0'])
return cmdline
# The methods below which start with mt_* are used to execute the desired code
# on the the main thread of execution to alleviate any issues the dbus-python
# library with regards to multi-threaded access. Essentially, we are trying to
# ensure all dbus library interaction is done from the same thread!
def _async_handler(call_back, parameters):
params_str = ", ".join(str(x) for x in parameters)
log_debug('Main thread execution, callback = %s, parameters = (%s)' %
(str(call_back), params_str))
try:
if parameters:
call_back(*parameters)
else:
call_back()
except:
st = traceback.format_exc()
log_error("mt_async_call: exception (logged, not reported!) \n %s" % st)
# Execute the function on the main thread with the provided parameters, do
# not return *any* value or wait for the execution to complete!
def mt_async_call(function_call_back, *parameters):
GLib.idle_add(_async_handler, function_call_back, parameters)
# Run the supplied function and arguments on the main thread and wait for them
# to complete while allowing the ability to get the return value too.
#
# Example:
# result = MThreadRunner(foo, arg1, arg2).done()
#
class MThreadRunner(object):
@staticmethod
def runner(obj):
# noinspection PyProtectedMember
obj._run()
with obj.cond:
obj.function_complete = True
obj.cond.notify_all()
def __init__(self, function, *args):
self.f = function
self.rc = None
self.exception = None
self.args = args
self.function_complete = False
self.cond = threading.Condition(threading.Lock())
def done(self):
GLib.idle_add(MThreadRunner.runner, self)
with self.cond:
if not self.function_complete:
self.cond.wait()
if self.exception:
raise self.exception
return self.rc
def _run(self):
try:
if self.args:
self.rc = self.f(*self.args)
else:
self.rc = self.f()
except BaseException as be:
self.exception = be
st = traceback.format_exc()
log_error("MThreadRunner: exception \n %s" % st)
log_error("Exception will be raised in calling thread!")
def _remove_objects(dbus_objects_rm):
for o in dbus_objects_rm:
cfg.om.remove_object(o, emit_signal=True)
# Remove dbus objects from main thread
def mt_remove_dbus_objects(objs):
MThreadRunner(_remove_objects, objs).done()

View File

@@ -19,7 +19,7 @@ from .request import RequestEntry
from .loader import common
from .state import State
from . import background
from .utils import round_size, mt_remove_dbus_objects
from .utils import round_size
from .job import JobState
@@ -78,7 +78,7 @@ class VgState(State):
(pv_name, pv_uuid) = p
rc.append(cfg.om.get_object_path_by_uuid_lvm_id(
pv_uuid, pv_name, pv_obj_path_generate))
return rc
return dbus.Array(rc, signature='o')
def __init__(self, Uuid, Name, Fmt,
SizeBytes, FreeBytes, SysId, ExtentSizeBytes,
@@ -145,35 +145,29 @@ class Vg(AutomatedProperties):
@staticmethod
def fetch_new_lv(vg_name, lv_name):
return cfg.om.get_object_path_by_lvm_id("%s/%s" % (vg_name, lv_name))
@staticmethod
def handle_execute(rc, out, err):
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
@staticmethod
def validate_dbus_object(vg_uuid, vg_name):
dbo = cfg.om.get_object_by_uuid_lvm_id(vg_uuid, vg_name)
if not dbo:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(vg_uuid, vg_name))
return dbo
cfg.load()
return cfg.om.get_object_by_lvm_id("%s/%s" % (vg_name, lv_name))
@staticmethod
def _rename(uuid, vg_name, new_name, rename_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.vg_rename(
vg_name, new_name, rename_options)
Vg.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_rename(vg_name, new_name,
rename_options)
if rc == 0:
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@@ -190,10 +184,31 @@ class Vg(AutomatedProperties):
@staticmethod
def _remove(uuid, vg_name, remove_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
# Remove the VG, if successful then remove from the model
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
Vg.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
# Remove the VG, if successful then remove from the model
rc, out, err = cmdhandler.vg_remove(vg_name, remove_options)
if rc == 0:
# Remove the VG
cfg.om.remove_object(dbo, True)
# If an LV has hidden LVs, things can get quite involved,
# especially if it's the last thin pool to get removed, so
# lets refresh all
cfg.load()
else:
# Need to work on error handling, need consistent
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@@ -208,9 +223,26 @@ class Vg(AutomatedProperties):
@staticmethod
def _change(uuid, vg_name, change_options):
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
Vg.handle_execute(rc, out, err)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_change(change_options, vg_name)
# To use an example with d-feet (Method input)
# {"activate": __import__('gi.repository.GLib', globals(),
# locals(), ['Variant']).Variant("s", "n")}
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
# TODO: This should be broken into a number of different methods
@@ -231,24 +263,34 @@ class Vg(AutomatedProperties):
@staticmethod
def _reduce(uuid, vg_name, missing, pv_object_paths, reduce_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
pv_devices = []
if dbo:
pv_devices = []
# If pv_object_paths is not empty, then get the device paths
if pv_object_paths and len(pv_object_paths) > 0:
for pv_op in pv_object_paths:
pv = cfg.om.get_object_by_path(pv_op)
if pv:
pv_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Object path not found = %s!' % pv_op)
# If pv_object_paths is not empty, then get the device paths
if pv_object_paths and len(pv_object_paths) > 0:
for pv_op in pv_object_paths:
pv = cfg.om.get_object_by_path(pv_op)
if pv:
pv_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Object path not found = %s!' % pv_op)
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
reduce_options)
Vg.handle_execute(rc, out, err)
rc, out, err = cmdhandler.vg_reduce(vg_name, missing, pv_devices,
reduce_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@@ -265,26 +307,36 @@ class Vg(AutomatedProperties):
@staticmethod
def _extend(uuid, vg_name, pv_object_paths, extend_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
extend_devices = []
if dbo:
extend_devices = []
for i in pv_object_paths:
pv = cfg.om.get_object_by_path(i)
if pv:
extend_devices.append(pv.lvm_id)
for i in pv_object_paths:
pv = cfg.om.get_object_by_path(i)
if pv:
extend_devices.append(pv.lvm_id)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV Object path not found = %s!' % i)
if len(extend_devices):
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
extend_options)
if rc == 0:
cfg.load()
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV Object path not found = %s!' % i)
if len(extend_devices):
rc, out, err = cmdhandler.vg_extend(vg_name, extend_devices,
extend_options)
Vg.handle_execute(rc, out, err)
VG_INTERFACE, 'No pv_object_paths provided!')
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'No pv_object_paths provided!')
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return '/'
@dbus.service.method(
@@ -321,24 +373,33 @@ class Vg(AutomatedProperties):
create_options):
# Make sure we have a dbus object representing it
pv_dests = []
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
Vg.validate_dbus_object(uuid, vg_name)
if dbo:
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Destination (%s) not found' % pr[0])
if len(pv_dests_and_ranges):
for pr in pv_dests_and_ranges:
pv_dbus_obj = cfg.om.get_object_by_path(pr[0])
if not pv_dbus_obj:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'PV Destination (%s) not found' % pr[0])
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
pv_dests.append((pv_dbus_obj.lvm_id, pr[1], pr[2]))
rc, out, err = cmdhandler.vg_lv_create(
vg_name, create_options, name, size_bytes, pv_dests)
rc, out, err = cmdhandler.vg_lv_create(
vg_name, create_options, name, size_bytes, pv_dests)
Vg.handle_execute(rc, out, err)
return Vg.fetch_new_lv(vg_name, name)
if rc == 0:
return Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -374,13 +435,25 @@ class Vg(AutomatedProperties):
def _lv_create_linear(uuid, vg_name, name, size_bytes,
thin_pool, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
rc, out, err = cmdhandler.vg_lv_create_linear(
vg_name, create_options, name, size_bytes, thin_pool)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_linear(
vg_name, create_options, name, size_bytes, thin_pool)
Vg.handle_execute(rc, out, err)
return Vg.fetch_new_lv(vg_name, name)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -400,12 +473,24 @@ class Vg(AutomatedProperties):
def _lv_create_striped(uuid, vg_name, name, size_bytes, num_stripes,
stripe_size_kb, thin_pool, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.vg_lv_create_striped(
vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool)
Vg.handle_execute(rc, out, err)
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_striped(
vg_name, create_options, name, size_bytes,
num_stripes, stripe_size_kb, thin_pool)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -428,11 +513,25 @@ class Vg(AutomatedProperties):
def _lv_create_mirror(uuid, vg_name, name, size_bytes,
num_copies, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies)
Vg.handle_execute(rc, out, err)
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_mirror(
vg_name, create_options, name, size_bytes, num_copies)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -453,12 +552,26 @@ class Vg(AutomatedProperties):
def _lv_create_raid(uuid, vg_name, name, raid_type, size_bytes,
num_stripes, stripe_size_kb, create_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.vg_lv_create_raid(
vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb)
Vg.handle_execute(rc, out, err)
return Vg.fetch_new_lv(vg_name, name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.vg_lv_create_raid(
vg_name, create_options, name, raid_type, size_bytes,
num_stripes, stripe_size_kb)
if rc == 0:
created_lv = Vg.fetch_new_lv(vg_name, name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
return created_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -479,27 +592,35 @@ class Vg(AutomatedProperties):
def _create_pool(uuid, vg_name, meta_data_lv, data_lv,
create_options, create_method):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
# Retrieve the full names for the metadata and data lv
md = cfg.om.get_object_by_path(meta_data_lv)
data = cfg.om.get_object_by_path(data_lv)
if md and data:
if dbo and md and data:
new_name = data.Name
rc, out, err = create_method(
md.lv_full_name(), data.lv_full_name(), create_options)
if rc == 0:
mt_remove_dbus_objects((md, data))
cfg.om.remove_object(md, emit_signal=True)
cfg.om.remove_object(data, emit_signal=True)
Vg.handle_execute(rc, out, err)
cache_pool_lv = Vg.fetch_new_lv(vg_name, new_name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
msg = ""
if not dbo:
msg += 'VG with uuid %s and name %s not present!' % \
(uuid, vg_name)
if not md:
msg += 'Meta data LV with object path %s not present!' % \
(meta_data_lv)
@@ -510,7 +631,7 @@ class Vg(AutomatedProperties):
raise dbus.exceptions.DBusException(VG_INTERFACE, msg)
return Vg.fetch_new_lv(vg_name, new_name)
return cache_pool_lv
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -544,21 +665,33 @@ class Vg(AutomatedProperties):
pv_devices = []
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
# Check for existence of pv object paths
for p in pv_object_paths:
pv = cfg.om.get_object_by_path(p)
if pv:
pv_devices.append(pv.Name)
if dbo:
# Check for existence of pv object paths
for p in pv_object_paths:
pv = cfg.om.get_object_by_path(p)
if pv:
pv_devices.append(pv.Name)
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV object path = %s not found' % p)
rc, out, err = cmdhandler.pv_tag(
pv_devices, tags_add, tags_del, tag_options)
if rc == 0:
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE, 'PV object path = %s not found' % p)
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
rc, out, err = cmdhandler.pv_tag(
pv_devices, tags_add, tags_del, tag_options)
Vg.handle_execute(rc, out, err)
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -596,12 +729,25 @@ class Vg(AutomatedProperties):
@staticmethod
def _vg_add_rm_tags(uuid, vg_name, tags_add, tags_del, tag_options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
rc, out, err = cmdhandler.vg_tag(
vg_name, tags_add, tags_del, tag_options)
Vg.handle_execute(rc, out, err)
return '/'
if dbo:
rc, out, err = cmdhandler.vg_tag(
vg_name, tags_add, tags_del, tag_options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -638,10 +784,23 @@ class Vg(AutomatedProperties):
@staticmethod
def _vg_change_set(uuid, vg_name, method, value, options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = method(vg_name, value, options)
Vg.handle_execute(rc, out, err)
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = method(vg_name, value, options)
if rc == 0:
dbo.refresh()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -681,7 +840,9 @@ class Vg(AutomatedProperties):
cfg.worker_q.put(r)
def _attribute(self, pos, ch):
return dbus.Boolean(self.state.attr[pos] == ch)
if self.state.attr[pos] == ch:
return True
return False
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -699,11 +860,23 @@ class Vg(AutomatedProperties):
def _vg_activate_deactivate(uuid, vg_name, activate, control_flags,
options):
# Make sure we have a dbus object representing it
Vg.validate_dbus_object(uuid, vg_name)
rc, out, err = cmdhandler.activate_deactivate(
'vgchange', vg_name, activate, control_flags, options)
Vg.handle_execute(rc, out, err)
return '/'
dbo = cfg.om.get_object_by_uuid_lvm_id(uuid, vg_name)
if dbo:
rc, out, err = cmdhandler.activate_deactivate(
'vgchange', vg_name, activate, control_flags, options)
if rc == 0:
cfg.load()
return '/'
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'Exit code %s, stderr = %s' % (str(rc), err))
else:
raise dbus.exceptions.DBusException(
VG_INTERFACE,
'VG with uuid %s and name %s not present!' %
(uuid, vg_name))
@dbus.service.method(
dbus_interface=VG_INTERFACE,
@@ -735,11 +908,11 @@ class Vg(AutomatedProperties):
@property
def Pvs(self):
return dbus.Array(self.state.Pvs, signature='o')
return self.state.Pvs
@property
def Lvs(self):
return dbus.Array(self.state.Lvs, signature='o')
return self.state.Lvs
@property
def lvm_id(self):

View File

@@ -16,7 +16,7 @@ top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES = lvmetad-core.c
SOURCES2 = lvmetactl.c
SOURCES2 = testclient.c
TARGETS = lvmetad lvmetactl
@@ -28,19 +28,22 @@ CFLOW_TARGET = lvmetad
include $(top_builddir)/make.tmpl
CFLAGS_lvmetactl.o += $(EXTRA_EXEC_CFLAGS)
CFLAGS_lvmetad-core.o += $(EXTRA_EXEC_CFLAGS)
INCLUDES += -I$(top_srcdir)/libdaemon/server
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
LIBS += $(RT_LIBS) $(DAEMON_LIBS) -ldevmapper $(PTHREAD_LIBS)
LVMLIBS = -ldaemonserver $(LVMINTERNAL_LIBS) -ldevmapper
LIBS += $(PTHREAD_LIBS)
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS)
CLDFLAGS += -L$(top_builddir)/libdaemon/server
CFLAGS += $(EXTRA_EXEC_CFLAGS)
lvmetad: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) -ldaemonserver $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LVMLIBS) $(LIBS)
lvmetactl: lvmetactl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmetactl.o $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmetactl.o $(LVMLIBS)
CLEAN_TARGETS += lvmetactl.o

View File

@@ -22,9 +22,9 @@
#define LVMETAD_TOKEN_UPDATE_IN_PROGRESS "update in progress"
#define LVMETAD_DISABLE_REASON_DIRECT "DIRECT"
#define LVMETAD_DISABLE_REASON_LVM1 "LVM1"
#define LVMETAD_DISABLE_REASON_DUPLICATES "DUPLICATES"
#define LVMETAD_DISABLE_REASON_VGRESTORE "VGRESTORE"
#define LVMETAD_DISABLE_REASON_REPAIR "REPAIR"
struct volume_group;

View File

@@ -200,12 +200,11 @@ struct vg_info {
#define GLFL_INVALID 0x00000001
#define GLFL_DISABLE 0x00000002
#define GLFL_DISABLE_REASON_DIRECT 0x00000004
/* 0x00000008 */
#define GLFL_DISABLE_REASON_LVM1 0x00000008
#define GLFL_DISABLE_REASON_DUPLICATES 0x00000010
#define GLFL_DISABLE_REASON_VGRESTORE 0x00000020
#define GLFL_DISABLE_REASON_REPAIR 0x00000040
#define GLFL_DISABLE_REASON_ALL (GLFL_DISABLE_REASON_DIRECT | GLFL_DISABLE_REASON_REPAIR | GLFL_DISABLE_REASON_DUPLICATES | GLFL_DISABLE_REASON_VGRESTORE)
#define GLFL_DISABLE_REASON_ALL (GLFL_DISABLE_REASON_DIRECT | GLFL_DISABLE_REASON_LVM1 | GLFL_DISABLE_REASON_DUPLICATES | GLFL_DISABLE_REASON_VGRESTORE)
#define VGFL_INVALID 0x00000001
@@ -258,21 +257,6 @@ static void destroy_metadata_hashes(lvmetad_state *s)
dm_hash_iterate(n, s->pvid_to_pvmeta)
dm_config_destroy(dm_hash_get_data(s->pvid_to_pvmeta, n));
dm_hash_iterate(n, s->vgid_to_vgname)
dm_free(dm_hash_get_data(s->vgid_to_vgname, n));
dm_hash_iterate(n, s->vgname_to_vgid)
dm_free(dm_hash_get_data(s->vgname_to_vgid, n));
dm_hash_iterate(n, s->vgid_to_info)
dm_free(dm_hash_get_data(s->vgid_to_info, n));
dm_hash_iterate(n, s->device_to_pvid)
dm_free(dm_hash_get_data(s->device_to_pvid, n));
dm_hash_iterate(n, s->pvid_to_vgid)
dm_free(dm_hash_get_data(s->pvid_to_vgid, n));
dm_hash_destroy(s->pvid_to_pvmeta);
dm_hash_destroy(s->vgid_to_metadata);
dm_hash_destroy(s->vgid_to_vgname);
@@ -615,7 +599,7 @@ static void mark_outdated_pv(lvmetad_state *s, const char *vgid, const char *pvi
outdated_pvs = dm_hash_lookup(s->vgid_to_outdated_pvs, vgid);
if (!outdated_pvs) {
if (!(outdated_pvs = config_tree_from_string_without_dup_node_check("outdated_pvs/pv_list = []")) ||
if (!(outdated_pvs = dm_config_from_string("outdated_pvs/pv_list = []")) ||
!(cft_vgid = make_text_node(outdated_pvs, "vgid", dm_pool_strdup(outdated_pvs->mem, vgid),
outdated_pvs->root, NULL)))
abort();
@@ -730,7 +714,7 @@ static response vg_lookup(lvmetad_state *s, request r)
if (!(res.cft->root = n = dm_config_create_node(res.cft, "response")))
goto nomem_un;
if (!(n->v = dm_config_create_value(res.cft)))
if (!(n->v = dm_config_create_value(cft)))
goto nomem_un;
n->parent = res.cft->root;
@@ -808,8 +792,7 @@ static int _update_pvid_to_vgid(lvmetad_state *s, struct dm_config_tree *vg,
if ((mode == REMOVE_EMPTY) && vgid_old) {
/* This copies the vgid_old string, doesn't reference it. */
if ((dm_hash_lookup(to_check, vgid_old) != (void*) 1) &&
!dm_hash_insert(to_check, vgid_old, (void*) 1)) {
if (!dm_hash_insert(to_check, vgid_old, (void*) 1)) {
ERROR(s, "update_pvid_to_vgid out of memory for hash insert vgid_old %s", vgid_old);
goto abort_daemon;
}
@@ -885,13 +868,16 @@ static int remove_metadata(lvmetad_state *s, const char *vgid, int update_pvids)
/* free the unmapped data */
if (info_lookup)
dm_free(info_lookup);
if (meta_lookup)
dm_config_destroy(meta_lookup);
if (name_lookup)
dm_free(name_lookup);
if (outdated_pvs_lookup)
dm_config_destroy(outdated_pvs_lookup);
dm_free(info_lookup);
dm_free(name_lookup);
dm_free(vgid_lookup);
if (vgid_lookup)
dm_free(vgid_lookup);
return 1;
}
@@ -1218,8 +1204,10 @@ static int _update_metadata_add_new(lvmetad_state *s, const char *new_name, cons
out:
out_free:
if (!new_name_dup || !new_vgid_dup || abort_daemon) {
dm_free(new_name_dup);
dm_free(new_vgid_dup);
if (new_name_dup)
dm_free(new_name_dup);
if (new_vgid_dup)
dm_free(new_vgid_dup);
ERROR(s, "lvmetad could not be updated and is aborting.");
exit(EXIT_FAILURE);
}
@@ -1261,8 +1249,8 @@ static int _update_metadata(lvmetad_state *s, const char *arg_name, const char *
const char *old_vgid = NULL;
const char *new_vgid = NULL;
const char *new_metadata_vgid;
int new_seq;
int old_seq = -1;
int new_seq = -1;
int needs_repair = 0;
int abort_daemon = 0;
int retval = 0;
@@ -1809,7 +1797,8 @@ static response pv_gone(lvmetad_state *s, request r)
}
dm_config_destroy(pvmeta);
dm_free(old_pvid);
if (old_pvid)
dm_free(old_pvid);
return daemon_reply_simple("OK", NULL );
}
@@ -1922,7 +1911,7 @@ static response pv_found(lvmetad_state *s, request r)
const char *arg_pvid = NULL;
const char *arg_pvid_lookup = NULL;
const char *new_pvid = NULL;
char *new_pvid_dup = NULL;
const char *new_pvid_dup = NULL;
const char *arg_name = NULL;
const char *arg_vgid = NULL;
const char *arg_vgid_lookup = NULL;
@@ -2085,7 +2074,7 @@ static response pv_found(lvmetad_state *s, request r)
if (!(new_pvid_dup = dm_strdup(new_pvid)))
goto nomem_free1;
if (!dm_hash_insert_binary(s->device_to_pvid, &new_device, sizeof(new_device), new_pvid_dup))
if (!dm_hash_insert_binary(s->device_to_pvid, &new_device, sizeof(new_device), (char *)new_pvid_dup))
goto nomem_free2;
if (!dm_hash_insert(s->pvid_to_pvmeta, new_pvid, new_pvmeta))
@@ -2121,8 +2110,6 @@ static response pv_found(lvmetad_state *s, request r)
DEBUGLOG(s, "pv_found ignore duplicate device %" PRIu64 " of existing PV for pvid %s",
arg_device, arg_pvid);
dm_config_destroy(new_pvmeta);
/* device_to_pvid no longer references prev_pvid_lookup */
dm_free((void*)prev_pvid_on_dev);
s->flags |= GLFL_DISABLE;
s->flags |= GLFL_DISABLE_REASON_DUPLICATES;
return reply_fail("Ignore duplicate PV");
@@ -2133,7 +2120,7 @@ static response pv_found(lvmetad_state *s, request r)
if (!(new_pvid_dup = dm_strdup(new_pvid)))
goto nomem_free1;
if (!dm_hash_insert_binary(s->device_to_pvid, &arg_device, sizeof(arg_device), new_pvid_dup))
if (!dm_hash_insert_binary(s->device_to_pvid, &arg_device, sizeof(arg_device), (char *)new_pvid_dup))
goto nomem_free2;
if (!dm_hash_insert(s->pvid_to_pvmeta, new_pvid, new_pvmeta))
@@ -2233,7 +2220,8 @@ static response pv_found(lvmetad_state *s, request r)
}
/* This was unhashed from device_to_pvid above. */
dm_free((void *)prev_pvid_on_dev);
if (prev_pvid_on_dev)
dm_free((void *)prev_pvid_on_dev);
return daemon_reply_simple("OK",
"status = %s", vg_status,
@@ -2245,7 +2233,7 @@ static response pv_found(lvmetad_state *s, request r)
NULL);
nomem_free2:
dm_free(new_pvid_dup);
dm_free((char *)new_pvid_dup);
nomem_free1:
dm_config_destroy(new_pvmeta);
nomem:
@@ -2367,8 +2355,8 @@ static response set_global_info(lvmetad_state *s, request r)
if ((reason = daemon_request_str(r, "disable_reason", NULL))) {
if (strstr(reason, LVMETAD_DISABLE_REASON_DIRECT))
reason_flags |= GLFL_DISABLE_REASON_DIRECT;
if (strstr(reason, LVMETAD_DISABLE_REASON_REPAIR))
reason_flags |= GLFL_DISABLE_REASON_REPAIR;
if (strstr(reason, LVMETAD_DISABLE_REASON_LVM1))
reason_flags |= GLFL_DISABLE_REASON_LVM1;
if (strstr(reason, LVMETAD_DISABLE_REASON_DUPLICATES))
reason_flags |= GLFL_DISABLE_REASON_DUPLICATES;
if (strstr(reason, LVMETAD_DISABLE_REASON_VGRESTORE))
@@ -2419,17 +2407,20 @@ static response set_global_info(lvmetad_state *s, request r)
static response get_global_info(lvmetad_state *s, request r)
{
/* This buffer should be large enough to hold all the possible reasons. */
char reason[REASON_BUF_SIZE] = { 0 };
char reason[REASON_BUF_SIZE];
char flag_str[64];
int pid;
/* This buffer should be large enough to hold all the possible reasons. */
memset(reason, 0, sizeof(reason));
pid = (int)daemon_request_int(r, "pid", 0);
if (s->flags & GLFL_DISABLE) {
snprintf(reason, REASON_BUF_SIZE, "%s%s%s%s",
snprintf(reason, REASON_BUF_SIZE - 1, "%s%s%s%s",
(s->flags & GLFL_DISABLE_REASON_DIRECT) ? LVMETAD_DISABLE_REASON_DIRECT "," : "",
(s->flags & GLFL_DISABLE_REASON_REPAIR) ? LVMETAD_DISABLE_REASON_REPAIR "," : "",
(s->flags & GLFL_DISABLE_REASON_LVM1) ? LVMETAD_DISABLE_REASON_LVM1 "," : "",
(s->flags & GLFL_DISABLE_REASON_DUPLICATES) ? LVMETAD_DISABLE_REASON_DUPLICATES "," : "",
(s->flags & GLFL_DISABLE_REASON_VGRESTORE) ? LVMETAD_DISABLE_REASON_VGRESTORE "," : "");
}
@@ -2525,8 +2516,10 @@ inval:
info = dm_hash_lookup(s->vgid_to_info, uuid);
if (!info) {
if (!(info = dm_zalloc(sizeof(struct vg_info))))
info = malloc(sizeof(struct vg_info));
if (!info)
goto bad;
memset(info, 0, sizeof(struct vg_info));
if (!dm_hash_insert(s->vgid_to_info, uuid, (void*)info))
goto bad;
}
@@ -2564,12 +2557,14 @@ static void _dump_pairs(struct buffer *buf, struct dm_hash_table *ht, const char
dm_hash_iterate(n, ht) {
const char *key = dm_hash_get_key(ht, n),
*val = dm_hash_get_data(ht, n);
buffer_append(buf, " ");
if (int_key)
(void) dm_asprintf(&append, " %d = \"%s\"\n", *(const int*)key, val);
(void) dm_asprintf(&append, "%d = \"%s\"", *(const int*)key, val);
else
(void) dm_asprintf(&append, " %s = \"%s\"\n", key, val);
(void) dm_asprintf(&append, "%s = \"%s\"", key, val);
if (append)
buffer_append(buf, append);
buffer_append(buf, "\n");
dm_free(append);
}
buffer_append(buf, "}\n");
@@ -2587,9 +2582,11 @@ static void _dump_info_version(struct buffer *buf, struct dm_hash_table *ht, con
while (n) {
const char *key = dm_hash_get_key(ht, n);
info = dm_hash_get_data(ht, n);
(void) dm_asprintf(&append, " %s = %lld\n", key, (long long)info->external_version);
buffer_append(buf, " ");
(void) dm_asprintf(&append, "%s = %lld", key, (long long)info->external_version);
if (append)
buffer_append(buf, append);
buffer_append(buf, "\n");
dm_free(append);
n = dm_hash_get_next(ht, n);
}
@@ -2608,9 +2605,11 @@ static void _dump_info_flags(struct buffer *buf, struct dm_hash_table *ht, const
while (n) {
const char *key = dm_hash_get_key(ht, n);
info = dm_hash_get_data(ht, n);
(void) dm_asprintf(&append, " %s = %llx\n", key, (long long)info->flags);
buffer_append(buf, " ");
(void) dm_asprintf(&append, "%s = %llx", key, (long long)info->flags);
if (append)
buffer_append(buf, append);
buffer_append(buf, "\n");
dm_free(append);
n = dm_hash_get_next(ht, n);
}
@@ -2669,7 +2668,6 @@ static response handler(daemon_state s, client_handle h, request r)
int pid;
int cache_lock = 0;
int info_lock = 0;
uint64_t timegap = 0;
rq = daemon_request_str(r, "request", "NONE");
token = daemon_request_str(r, "token", "NONE");
@@ -2698,8 +2696,9 @@ static response handler(daemon_state s, client_handle h, request r)
if (!prev_in_progress && this_in_progress) {
/* New update is starting (filter token is replaced by update token) */
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = _monotonic_seconds();
state->update_timeout = update_timeout;
state->update_pid = pid;
@@ -2712,32 +2711,23 @@ static response handler(daemon_state s, client_handle h, request r)
state->update_cmd);
} else if (prev_in_progress && this_in_progress) {
timegap = _monotonic_seconds() - state->update_begin;
if (timegap < state->update_timeout) {
pthread_mutex_unlock(&state->token_lock);
return daemon_reply_simple("token_updating",
"expected = %s", state->token,
"update_pid = " FMTd64, (int64_t)state->update_pid,
"reason = %s", "another command has populated the cache",
NULL);
}
/* Current update is cancelled and replaced by a new update */
WARN(state, "token_update replacing pid %d begin %llu len %d cmd %s",
DEBUGLOG(state, "token_update replacing pid %d begin %llu len %d cmd %s",
state->update_pid,
(unsigned long long)state->update_begin,
(int)(timegap),
(int)(_monotonic_seconds() - state->update_begin),
state->update_cmd);
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = _monotonic_seconds();
state->update_timeout = update_timeout;
state->update_pid = pid;
strncpy(state->update_cmd, cmd, CMD_NAME_SIZE - 1);
WARN(state, "token_update begin %llu timeout %d pid %d cmd %s",
DEBUGLOG(state, "token_update begin %llu timeout %d pid %d cmd %s",
(unsigned long long)state->update_begin,
state->update_timeout,
state->update_pid,
@@ -2748,23 +2738,23 @@ static response handler(daemon_state s, client_handle h, request r)
if (state->update_pid != pid) {
/* If a pid doing update was cancelled, ignore its token update at the end. */
WARN(state, "token_update ignored from cancelled update pid %d", pid);
DEBUGLOG(state, "token_update ignored from cancelled update pid %d", pid);
pthread_mutex_unlock(&state->token_lock);
return daemon_reply_simple("token_mismatch",
"expected = %s", state->token,
"received = %s", token,
"update_pid = " FMTd64, (int64_t)state->update_pid,
"reason = %s", "another command has populated the cache",
NULL);
"reason = %s", "another command has populated the cache");
}
WARN(state, "token_update end len %d pid %d new token %s",
DEBUGLOG(state, "token_update end len %d pid %d new token %s",
(int)(_monotonic_seconds() - state->update_begin),
state->update_pid, token);
(void) dm_strncpy(prev_token, state->token, sizeof(prev_token));
(void) dm_strncpy(state->token, token, sizeof(state->token));
memcpy(prev_token, state->token, 128);
strncpy(state->token, token, 128);
state->token[127] = 0;
state->update_begin = 0;
state->update_timeout = 0;
state->update_pid = 0;
@@ -2788,8 +2778,7 @@ static response handler(daemon_state s, client_handle h, request r)
"expected = %s", state->token,
"received = %s", token,
"update_pid = " FMTd64, (int64_t)state->update_pid,
"reason = %s", "another command has populated the cache",
NULL);
"reason = %s", "another command has populated the cache");
}
/* If a pid doing update was cancelled, ignore its update messages. */
@@ -2804,8 +2793,7 @@ static response handler(daemon_state s, client_handle h, request r)
"expected = %s", state->token,
"received = %s", token,
"update_pid = " FMTd64, (int64_t)state->update_pid,
"reason = %s", "another command has populated the lvmetad cache",
NULL);
"reason = %s", "another command has populated the lvmetad cache");
}
pthread_mutex_unlock(&state->token_lock);
@@ -2964,7 +2952,7 @@ static void usage(const char *prog, FILE *file)
int main(int argc, char *argv[])
{
signed char opt;
struct timespec timeout;
struct timeval timeout;
daemon_idle di = { .ptimeout = &timeout };
lvmetad_state ls = { .log_config = "" };
daemon_state s = {

View File

@@ -19,33 +19,41 @@ SOURCES = lvmlockd-core.c
ifeq ("@BUILD_LOCKDSANLOCK@", "yes")
SOURCES += lvmlockd-sanlock.c
LOCK_LIBS += -lsanlock_client
endif
ifeq ("@BUILD_LOCKDDLM@", "yes")
SOURCES += lvmlockd-dlm.c
LOCK_LIBS += -ldlm_lt
endif
SOURCES2 = lvmlockctl.c
TARGETS = lvmlockd lvmlockctl
.PHONY: install_lvmlockd
include $(top_builddir)/make.tmpl
CFLAGS += $(EXTRA_EXEC_CFLAGS)
INCLUDES += -I$(top_srcdir)/libdaemon/server
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
LIBS += $(RT_LIBS) $(DAEMON_LIBS) -ldevmapper $(PTHREAD_LIBS)
LVMLIBS = -ldaemonserver $(LVMINTERNAL_LIBS) -ldevmapper
LIBS += $(PTHREAD_LIBS)
ifeq ("@BUILD_LOCKDSANLOCK@", "yes")
LIBS += -lsanlock_client
endif
ifeq ("@BUILD_LOCKDDLM@", "yes")
LIBS += -ldlm_lt
endif
LDFLAGS += -L$(top_builddir)/libdaemon/server
CLDFLAGS += -L$(top_builddir)/libdaemon/server
lvmlockd: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LOCK_LIBS) -ldaemonserver $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LVMLIBS) $(LIBS)
lvmlockctl: lvmlockctl.o $(top_builddir)/libdaemon/client/libdaemonclient.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmlockctl.o $(LIBS)
lvmlockctl: lvmlockctl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmlockctl.o $(LVMLIBS)
install_lvmlockd: lvmlockd
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)

View File

@@ -379,7 +379,7 @@ static int setup_dump_socket(void)
rv = bind(s, (struct sockaddr *) &dump_addr, dump_addrlen);
if (rv < 0) {
rv = -errno;
if (close(s))
if (!close(s))
log_error("failed to close dump socket");
return rv;
}

View File

@@ -22,9 +22,9 @@ static inline daemon_handle lvmlockd_open(const char *sock)
daemon_info lvmlockd_info = {
.path = "lvmlockd",
.socket = sock ?: LVMLOCKD_SOCKET,
.autostart = 0,
.protocol = "lvmlockd",
.protocol_version = 1,
.autostart = 0
};
return daemon_open(lvmlockd_info);
@@ -32,7 +32,7 @@ static inline daemon_handle lvmlockd_open(const char *sock)
static inline void lvmlockd_close(daemon_handle h)
{
daemon_close(h);
return daemon_close(h);
}
/*
@@ -48,7 +48,5 @@ static inline void lvmlockd_close(daemon_handle h)
#define EVGKILLED 217 /* sanlock lost access to leases and VG is killed. */
#define ELOCKIO 218 /* sanlock io errors during lock op, may be transient. */
#define EREMOVED 219
#define EDEVOPEN 220 /* sanlock failed to open lvmlock LV */
#define ELMERR 221
#endif /* _LVM_LVMLOCKD_CLIENT_H */

View File

@@ -19,7 +19,6 @@
#include "lvm-version.h"
#include "lvmetad-client.h"
#include "lvmlockd-client.h"
#include "dm-ioctl.h" /* for DM_UUID_LEN */
/* #include <assert.h> */
#include <errno.h>
@@ -1009,8 +1008,6 @@ static void add_work_action(struct action *act)
pthread_mutex_unlock(&worker_mutex);
}
#define ERR_LVMETAD_NOT_RUNNING -200
static daemon_reply send_lvmetad(const char *id, ...)
{
daemon_reply reply;
@@ -1031,9 +1028,9 @@ retry:
if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0) {
err = lvmetad_handle.error ?: lvmetad_handle.socket_fd;
pthread_mutex_unlock(&lvmetad_mutex);
log_debug("lvmetad_open reconnect error %d", err);
log_error("lvmetad_open reconnect error %d", err);
memset(&reply, 0, sizeof(reply));
reply.error = ERR_LVMETAD_NOT_RUNNING;
reply.error = err;
va_end(ap);
return reply;
} else {
@@ -1267,15 +1264,6 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
* caches, and tell lvmetad to set global invalid to 0.
*/
/*
* lvmetad not running:
* Even if we have not previously found lvmetad running,
* we attempt to connect and invalidate in case it has
* been started while lvmlockd is running. We don't
* want to allow lvmetad to be used with invalid data if
* it happens to be enabled and started after lvmlockd.
*/
if (inval_meta && (r->type == LD_RT_VG)) {
daemon_reply reply;
char *uuid;
@@ -1295,10 +1283,8 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
"version = " FMTd64, (int64_t)new_version,
NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
if (reply.error != ERR_LVMETAD_NOT_RUNNING)
log_error("set_vg_info in lvmetad failed %d", reply.error);
}
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK"))
log_error("set_vg_info in lvmetad failed %d", reply.error);
daemon_reply_destroy(reply);
}
@@ -1313,10 +1299,8 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
"global_invalid = " FMTd64, INT64_C(1),
NULL);
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
if (reply.error != ERR_LVMETAD_NOT_RUNNING)
log_error("set_global_info in lvmetad failed %d", reply.error);
}
if (reply.error || strcmp(daemon_reply_str(reply, "response", ""), "OK"))
log_error("set_global_info in lvmetad failed %d", reply.error);
daemon_reply_destroy(reply);
}
@@ -1404,11 +1388,12 @@ static int res_convert(struct lockspace *ls, struct resource *r,
}
rv = lm_convert(ls, r, act->mode, act, r_version);
log_debug("S %s R %s res_convert rv %d", ls->name, r->name, rv);
if (rv < 0)
if (rv < 0) {
log_error("S %s R %s res_convert lm error %d", ls->name, r->name, rv);
return rv;
}
log_debug("S %s R %s res_convert lm done", ls->name, r->name);
if (lk->mode == LD_LK_EX && act->mode == LD_LK_SH) {
r->sh_count = 1;
@@ -2666,16 +2651,10 @@ out_act:
ls->drop_vg = drop_vg;
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
global_dlm_lockspace_exists = 0;
/*
* Avoid a name collision of the same lockspace is added again before
* this thread is cleaned up. We just set ls->name to a "junk" value
* for the short period until the struct is freed. We could make it
* blank or fill it with garbage, but instead set it to REM:<name>
* to make it easier to follow progress of freeing is via log_debug.
*/
dm_strncpy(tmp_name, ls->name, sizeof(tmp_name));
snprintf(ls->name, sizeof(ls->name), "REM:%s", tmp_name);
/* Avoid a name collision of the same lockspace is added again before this thread is cleaned up. */
memset(tmp_name, 0, sizeof(tmp_name));
snprintf(tmp_name, MAX_NAME, "REM:%s", ls->name);
memcpy(ls->name, tmp_name, MAX_NAME);
pthread_mutex_unlock(&lockspaces_mutex);
/* worker_thread will join this thread, and free the ls */
@@ -2815,9 +2794,6 @@ static int add_lockspace_thread(const char *ls_name,
if (ls2->thread_stop) {
log_debug("add_lockspace_thread %s exists and stopping", ls->name);
rv = -EAGAIN;
} else if (!ls2->create_fail && !ls2->create_done) {
log_debug("add_lockspace_thread %s exists and starting", ls->name);
rv = -ESTARTING;
} else {
log_debug("add_lockspace_thread %s exists", ls->name);
rv = -EEXIST;
@@ -3059,7 +3035,7 @@ static int count_lockspace_starting(uint32_t client_id)
pthread_mutex_lock(&lockspaces_mutex);
list_for_each_entry(ls, &lockspaces, list) {
if (client_id && (ls->start_client_id != client_id))
if (ls->start_client_id != client_id)
continue;
if (!ls->create_done && !ls->create_fail) {
@@ -3327,6 +3303,7 @@ static int work_init_lv(struct action *act)
lm_type = ls->lm_type;
memcpy(vg_args, ls->vg_args, MAX_ARGS);
free_offset = ls->free_lock_offset;
ls->free_lock_offset = 0;
}
pthread_mutex_unlock(&lockspaces_mutex);
@@ -3460,7 +3437,7 @@ static void *worker_thread_main(void *arg_in)
add_client_result(act);
} else if (act->op == LD_OP_START_WAIT) {
act->result = count_lockspace_starting(0);
act->result = count_lockspace_starting(act->client_id);
if (!act->result)
add_client_result(act);
else
@@ -3494,7 +3471,7 @@ static void *worker_thread_main(void *arg_in)
list_for_each_entry_safe(act, safe, &delayed_list, list) {
if (act->op == LD_OP_START_WAIT) {
log_debug("work delayed start_wait for client %u", act->client_id);
act->result = count_lockspace_starting(0);
act->result = count_lockspace_starting(act->client_id);
if (!act->result) {
list_del(&act->list);
add_client_result(act);
@@ -3556,15 +3533,11 @@ static int setup_worker_thread(void)
static void close_worker_thread(void)
{
int perrno;
pthread_mutex_lock(&worker_mutex);
worker_stop = 1;
pthread_cond_signal(&worker_cond);
pthread_mutex_unlock(&worker_mutex);
if ((perrno = pthread_join(worker_thread, NULL)))
log_error("pthread_join worker_thread error %d", perrno);
pthread_join(worker_thread, NULL);
}
/* client_mutex is locked */
@@ -3693,17 +3666,7 @@ static int client_send_result(struct client *cl, struct action *act)
if (!gl_lsname_dlm[0])
strcat(result_flags, "NO_GL_LS,");
} else {
int found_lm = 0;
if (lm_support_dlm() && lm_is_running_dlm())
found_lm++;
if (lm_support_sanlock() && lm_is_running_sanlock())
found_lm++;
if (!found_lm)
strcat(result_flags, "NO_GL_LS,NO_LM");
else
strcat(result_flags, "NO_GL_LS");
strcat(result_flags, "NO_GL_LS,");
}
}
@@ -3800,8 +3763,7 @@ static int client_send_result(struct client *cl, struct action *act)
if (dump_fd >= 0) {
/* To avoid deadlock, send data here after the reply. */
send_dump_buf(dump_fd, dump_len);
if (close(dump_fd))
log_error("failed to close dump socket %d", dump_fd);
close(dump_fd);
}
return rv;
@@ -3874,9 +3836,8 @@ static int add_lock_action(struct action *act)
pthread_mutex_lock(&lockspaces_mutex);
if (ls_name[0])
ls = find_lockspace_name(ls_name);
pthread_mutex_unlock(&lockspaces_mutex);
if (!ls) {
pthread_mutex_unlock(&lockspaces_mutex);
if (act->op == LD_OP_UPDATE && act->rt == LD_RT_VG) {
log_debug("lockspace \"%s\" not found ignored for vg update", ls_name);
return -ENOLS;
@@ -4470,7 +4431,7 @@ static void client_recv_action(struct client *cl)
return;
}
req.cft = config_tree_from_string_without_dup_node_check(req.buffer.mem);
req.cft = dm_config_from_string(req.buffer.mem);
if (!req.cft) {
log_error("client recv %u config_from_string error", cl->id);
buffer_destroy(&req.buffer);
@@ -4793,8 +4754,8 @@ static void *client_thread_main(void *arg_in)
} else {
pthread_mutex_unlock(&cl->mutex);
}
} else
pthread_mutex_unlock(&client_mutex);
}
pthread_mutex_unlock(&client_mutex);
}
out:
return NULL;
@@ -4818,15 +4779,11 @@ static int setup_client_thread(void)
static void close_client_thread(void)
{
int perrno;
pthread_mutex_lock(&client_mutex);
client_stop = 1;
pthread_cond_signal(&client_cond);
pthread_mutex_unlock(&client_mutex);
if ((perrno = pthread_join(client_thread, NULL)))
log_error("pthread_join client_thread error %d", perrno);
pthread_join(client_thread, NULL);
}
/*
@@ -4950,10 +4907,14 @@ static int get_lockd_vgs(struct list_head *vg_lockd)
continue;
for (lv_cn = md_cn->child; lv_cn; lv_cn = lv_cn->sib) {
snprintf(find_str_path, PATH_MAX, "%s/lock_type", lv_cn->key);
lock_type = dm_config_find_str(lv_cn, find_str_path, NULL);
if (!lock_type)
continue;
snprintf(find_str_path, PATH_MAX, "%s/lock_args", lv_cn->key);
lock_args = dm_config_find_str(lv_cn, find_str_path, NULL);
if (!lock_args)
continue;
snprintf(find_str_path, PATH_MAX, "%s/id", lv_cn->key);
lv_uuid = dm_config_find_str(lv_cn, find_str_path, NULL);
@@ -4999,7 +4960,7 @@ out:
return rv;
}
static char _dm_uuid[DM_UUID_LEN];
static char _dm_uuid[64];
static char *get_dm_uuid(char *dm_name)
{
@@ -5218,17 +5179,20 @@ static void adopt_locks(void)
* Get list of lockspaces from lock managers.
* Get list of VGs from lvmetad with a lockd type.
* Get list of active lockd type LVs from /dev.
*
* ECONNREFUSED means the lock manager is not running.
* This is expected for at least one of them.
*/
if (lm_support_dlm() && lm_is_running_dlm()) {
if (lm_support_dlm()) {
rv = lm_get_lockspaces_dlm(&ls_found);
if (rv < 0)
if ((rv < 0) && (rv != -ECONNREFUSED))
goto fail;
}
if (lm_support_sanlock() && lm_is_running_sanlock()) {
if (lm_support_sanlock()) {
rv = lm_get_lockspaces_sanlock(&ls_found);
if (rv < 0)
if ((rv < 0) && (rv != -ECONNREFUSED))
goto fail;
}
@@ -5305,7 +5269,7 @@ static void adopt_locks(void)
list_for_each_entry_safe(ls1, l1safe, &ls_found, list) {
/* The dlm global lockspace is special and doesn't match a VG. */
if ((ls1->lm_type == LD_LM_DLM) && !strcmp(ls1->name, gl_lsname_dlm)) {
if (!strcmp(ls1->name, gl_lsname_dlm)) {
list_del(&ls1->list);
free(ls1);
continue;
@@ -5866,7 +5830,7 @@ static int main_loop(daemon_state *ds_arg)
pthread_mutex_init(&lvmetad_mutex, NULL);
lvmetad_handle = lvmetad_open(NULL);
if (lvmetad_handle.error || lvmetad_handle.socket_fd < 0)
log_debug("lvmetad_open error %d", lvmetad_handle.error);
log_error("lvmetad_open error %d", lvmetad_handle.error);
else
lvmetad_connected = 1;
@@ -5874,13 +5838,8 @@ static int main_loop(daemon_state *ds_arg)
* Attempt to rejoin lockspaces and adopt locks from a previous
* instance of lvmlockd that left behind lockspaces/locks.
*/
if (adopt_opt) {
/* FIXME: implement this without lvmetad */
if (!lvmetad_connected)
log_error("Cannot adopt locks without lvmetad running.");
else
adopt_locks();
}
if (adopt_opt)
adopt_locks();
while (1) {
rv = poll(pollfd, pollfd_maxi + 1, -1);
@@ -6035,14 +5994,14 @@ static void usage(char *prog, FILE *file)
int main(int argc, char *argv[])
{
daemon_state ds = {
.name = "lvmlockd",
.daemon_main = main_loop,
.daemon_init = NULL,
.daemon_fini = NULL,
.pidfile = getenv("LVM_LVMLOCKD_PIDFILE"),
.socket_path = getenv("LVM_LVMLOCKD_SOCKET"),
.protocol = lvmlockd_protocol,
.protocol_version = lvmlockd_protocol_version,
.daemon_init = NULL,
.daemon_fini = NULL,
.daemon_main = main_loop,
.name = "lvmlockd",
};
static struct option long_options[] = {

View File

@@ -508,7 +508,7 @@ lockrv:
}
if (rv < 0) {
log_error("S %s R %s lock_dlm acquire error %d errno %d", ls->name, r->name, rv, errno);
return -ELMERR;
return rv;
}
if (rdd->vb) {
@@ -581,7 +581,6 @@ int lm_convert_dlm(struct lockspace *ls, struct resource *r,
}
if (rv < 0) {
log_error("S %s R %s convert_dlm error %d", ls->name, r->name, rv);
rv = -ELMERR;
}
return rv;
}
@@ -655,7 +654,6 @@ int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
0, NULL, NULL, NULL);
if (rv < 0) {
log_error("S %s R %s unlock_dlm error %d", ls->name, r->name, rv);
rv = -ELMERR;
}
return rv;
@@ -699,7 +697,7 @@ int lm_hosts_dlm(struct lockspace *ls, int notify)
return 0;
memset(ls_nodes_path, 0, sizeof(ls_nodes_path));
snprintf(ls_nodes_path, PATH_MAX, "%s/%s/nodes",
snprintf(ls_nodes_path, PATH_MAX-1, "%s/%s/nodes",
DLM_LOCKSPACES_PATH, ls->name);
if (!(ls_dir = opendir(ls_nodes_path)))

View File

@@ -151,7 +151,7 @@ struct resource {
struct list_head locks;
struct list_head actions;
char lv_args[MAX_ARGS+1];
char lm_data[]; /* lock manager specific data */
char lm_data[0]; /* lock manager specific data */
};
#define LD_LF_PERSISTENT 0x00000001

View File

@@ -224,10 +224,7 @@ static int lock_lv_offset_from_args(char *lv_args, uint64_t *lock_lv_offset)
if (rv < 0)
return rv;
errno = 0;
*lock_lv_offset = strtoull(offset_str, NULL, 10);
if (errno)
return -1;
return 0;
}
@@ -294,36 +291,6 @@ out:
return host_id;
}
/* Prepare valid /dev/mapper/vgname-lvname with all the mangling */
static int build_dm_path(char *path, size_t path_len,
const char *vg_name, const char *lv_name)
{
struct dm_pool *mem;
char *dm_name;
int rv = 0;
if (!(mem = dm_pool_create("namepool", 1024))) {
log_error("Failed to create mempool.");
return -ENOMEM;
}
if (!(dm_name = dm_build_dm_name(mem, vg_name, lv_name, NULL))) {
log_error("Failed to build dm name for %s/%s.", vg_name, lv_name);
rv = -EINVAL;
goto fail;
}
if ((dm_snprintf(path, path_len, "%s/%s", dm_dir(), dm_name) < 0)) {
log_error("Failed to create path %s/%s.", dm_dir(), dm_name);
rv = -EINVAL;
}
fail:
dm_pool_destroy(mem);
return rv;
}
/*
* vgcreate
*
@@ -366,8 +333,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
if (strlen(lock_lv_name) + strlen(lock_args_version) + 2 > MAX_ARGS)
return -EARGS;
if ((rv = build_dm_path(disk.path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
log_debug("S %s init_vg_san path %s", ls_name, disk.path);
@@ -387,19 +353,12 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
log_debug("sanlock daemon version %08x proto %08x",
daemon_version, daemon_proto);
rv = sanlock_align(&disk);
if (rv <= 0) {
if (rv == -EACCES) {
log_error("S %s init_vg_san sanlock error -EACCES: no permission to access %s",
ls_name, disk.path);
return -EDEVOPEN;
} else {
log_error("S %s init_vg_san sanlock error %d trying to get align size of %s",
ls_name, rv, disk.path);
return -EARGS;
}
} else
align_size = rv;
align_size = sanlock_align(&disk);
if (align_size <= 0) {
log_error("S %s init_vg_san bad disk align size %d %s",
ls_name, align_size, disk.path);
return -EARGS;
}
strncpy(ss.name, ls_name, SANLK_NAME_LEN);
memcpy(ss.host_id_disk.path, disk.path, SANLK_PATH_LEN);
@@ -544,8 +503,7 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN);
rd.rs.num_disks = 1;
if ((rv = build_dm_path(rd.rs.disks[0].path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(rd.rs.disks[0].path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
align_size = sanlock_align(&rd.rs.disks[0]);
if (align_size <= 0) {
@@ -644,8 +602,7 @@ int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_
return rv;
}
if ((rv = build_dm_path(disk.path, SANLK_PATH_LEN, vg_name, lock_lv_name)))
return rv;
snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);
log_debug("S %s rename_vg_san path %s", ls_name, disk.path);
@@ -978,9 +935,7 @@ int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset)
struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
struct sanlk_resourced rd;
uint64_t offset;
uint64_t start_offset;
int rv;
int round = 0;
if (daemon_test) {
*free_offset = (1048576 * LV_LOCK_BEGIN) + (1048576 * (daemon_test_lv_count + 1));
@@ -993,22 +948,9 @@ int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset)
rd.rs.num_disks = 1;
strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1);
if (ls->free_lock_offset)
offset = ls->free_lock_offset;
else
offset = lms->align_size * LV_LOCK_BEGIN;
start_offset = offset;
offset = lms->align_size * LV_LOCK_BEGIN;
while (1) {
if (offset >= start_offset && round) {
/* This indicates the all space are allocated. */
log_debug("S %s init_lv_san read back to start offset %llu",
ls->name, (unsigned long long)offset);
rv = -EMSGSIZE;
return rv;
}
rd.rs.disks[0].offset = offset;
memset(rd.rs.name, 0, SANLK_NAME_LEN);
@@ -1018,14 +960,7 @@ int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset)
/* This indicates the end of the device is reached. */
log_debug("S %s find_free_lock_san read limit offset %llu",
ls->name, (unsigned long long)offset);
/* remember the NO SPACE offset, if no free area left,
* search from this offset after extend */
*free_offset = offset;
offset = lms->align_size * LV_LOCK_BEGIN;
round = 1;
continue;
return -EMSGSIZE;
}
/*
@@ -1102,10 +1037,10 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
* and appending "lockctl" to get /path/to/lvmlockctl.
*/
memset(killpath, 0, sizeof(killpath));
snprintf(killpath, SANLK_PATH_LEN, "%slockctl", LVM_PATH);
snprintf(killpath, SANLK_PATH_LEN - 1, "%slockctl", LVM_PATH);
memset(killargs, 0, sizeof(killargs));
snprintf(killargs, SANLK_PATH_LEN, "--kill %s", ls->vg_name);
snprintf(killargs, SANLK_PATH_LEN - 1, "--kill %s", ls->vg_name);
rv = check_args_version(ls->vg_args, VG_LOCK_ARGS_MAJOR);
if (rv < 0) {
@@ -1121,8 +1056,8 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
goto fail;
}
if ((ret = build_dm_path(disk_path, SANLK_PATH_LEN, ls->vg_name, lock_lv_name)))
goto fail;
snprintf(disk_path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s",
ls->vg_name, lock_lv_name);
/*
* When a vg is started, the internal sanlock lv should be
@@ -1493,12 +1428,6 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
rv = sanlock_acquire(lms->sock, -1, flags, 1, &rs, &opt);
/*
* errors: translate the sanlock error number to an lvmlockd error.
* We don't want to return an sanlock-specific error number from
* this function to code that doesn't recognize sanlock error numbers.
*/
if (rv == -EAGAIN) {
/*
* It appears that sanlock_acquire returns EAGAIN when we request
@@ -1567,26 +1496,6 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
return -EAGAIN;
}
if (rv == SANLK_AIO_TIMEOUT) {
/*
* sanlock got an i/o timeout when trying to acquire the
* lease on disk.
*/
log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv);
*retry = 0;
return -EAGAIN;
}
if (rv == SANLK_DBLOCK_LVER || rv == SANLK_DBLOCK_MBAL) {
/*
* There was contention with another host for the lease,
* and we lost.
*/
log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv);
*retry = 0;
return -EAGAIN;
}
if (rv == SANLK_ACQUIRE_OWNED_RETRY) {
/*
* The lock is held by a failed host, and will eventually
@@ -1637,25 +1546,15 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
if (rv == -ENOSPC)
rv = -ELOCKIO;
/*
* generic error number for sanlock errors that we are not
* catching above.
*/
return -ELMERR;
return rv;
}
/*
* sanlock acquire success (rv 0)
*/
if (rds->vb) {
rv = sanlock_get_lvb(0, rs, (char *)&vb, sizeof(vb));
if (rv < 0) {
log_error("S %s R %s lock_san get_lvb error %d", ls->name, r->name, rv);
memset(rds->vb, 0, sizeof(struct val_blk));
memset(vb_out, 0, sizeof(struct val_blk));
/* the lock is still acquired, the vb values considered invalid */
rv = 0;
goto out;
}
@@ -1708,7 +1607,6 @@ int lm_convert_sanlock(struct lockspace *ls, struct resource *r,
if (rv < 0) {
log_error("S %s R %s convert_san set_lvb error %d",
ls->name, r->name, rv);
return -ELMERR;
}
}
@@ -1721,35 +1619,14 @@ int lm_convert_sanlock(struct lockspace *ls, struct resource *r,
if (daemon_test)
return 0;
/*
* Don't block waiting for a failed lease to expire since it causes
* sanlock_convert to block for a long time, which would prevent this
* thread from processing other lock requests.
*
* FIXME: SANLK_CONVERT_OWNER_NOWAIT is the same as SANLK_ACQUIRE_OWNER_NOWAIT.
* Change to use the CONVERT define when the latest sanlock version has it.
*/
flags |= SANLK_ACQUIRE_OWNER_NOWAIT;
rv = sanlock_convert(lms->sock, -1, flags, rs);
if (!rv)
return 0;
switch (rv) {
case -EAGAIN:
case SANLK_ACQUIRE_IDLIVE:
case SANLK_ACQUIRE_OWNED:
case SANLK_ACQUIRE_OWNED_RETRY:
case SANLK_ACQUIRE_OTHER:
case SANLK_AIO_TIMEOUT:
case SANLK_DBLOCK_LVER:
case SANLK_DBLOCK_MBAL:
/* expected errors from known/normal cases like lock contention or io timeouts */
log_debug("S %s R %s convert_san error %d", ls->name, r->name, rv);
if (rv == -EAGAIN) {
/* FIXME: When could this happen? Should something different be done? */
log_error("S %s R %s convert_san EAGAIN", ls->name, r->name);
return -EAGAIN;
default:
}
if (rv < 0) {
log_error("S %s R %s convert_san convert error %d", ls->name, r->name, rv);
rv = -ELMERR;
}
return rv;
@@ -1786,7 +1663,6 @@ static int release_rename(struct lockspace *ls, struct resource *r)
rv = sanlock_release(lms->sock, -1, SANLK_REL_RENAME, 2, res_args);
if (rv < 0) {
log_error("S %s R %s unlock_san release rename error %d", ls->name, r->name, rv);
rv = -ELMERR;
}
free(res_args);
@@ -1843,7 +1719,6 @@ int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
if (rv < 0) {
log_error("S %s R %s unlock_san set_lvb error %d",
ls->name, r->name, rv);
return -ELMERR;
}
}
@@ -1862,8 +1737,6 @@ int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
if (rv == -EIO)
rv = -ELOCKIO;
else if (rv < 0)
rv = -ELMERR;
return rv;
}

View File

@@ -27,14 +27,18 @@ CFLOW_TARGET = lvmpolld
include $(top_builddir)/make.tmpl
CFLAGS += $(EXTRA_EXEC_CFLAGS)
INCLUDES += -I$(top_srcdir)/libdaemon/server
LDFLAGS += -L$(top_builddir)/libdaemon/server $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS)
LIBS += $(DAEMON_LIBS) -ldaemonserver -ldevmapper $(PTHREAD_LIBS)
LVMLIBS = -ldaemonserver $(LVMINTERNAL_LIBS) -ldevmapper
LIBS += $(PTHREAD_LIBS)
LDFLAGS += -L$(top_builddir)/libdaemon/server $(DAEMON_LDFLAGS)
CLDFLAGS += -L$(top_builddir)/libdaemon/server
CFLAGS += $(DAEMON_CFLAGS)
lvmpolld: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBS)
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LVMLIBS) $(LIBS)
install_lvmpolld: lvmpolld
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)

View File

@@ -19,12 +19,10 @@
#define MIN_ARGV_SIZE 8
static const char *const polling_ops[] = {
[PVMOVE] = LVMPD_REQ_PVMOVE,
[CONVERT] = LVMPD_REQ_CONVERT,
[MERGE] = LVMPD_REQ_MERGE,
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN
};
static const char *const const polling_ops[] = { [PVMOVE] = LVMPD_REQ_PVMOVE,
[CONVERT] = LVMPD_REQ_CONVERT,
[MERGE] = LVMPD_REQ_MERGE,
[MERGE_THIN] = LVMPD_REQ_MERGE_THIN };
const char *polling_op(enum poll_type type)
{

View File

@@ -915,7 +915,7 @@ int main(int argc, char *argv[])
int option_index = 0;
int client = 0, server = 0;
unsigned action = ACTION_MAX;
struct timespec timeout;
struct timeval timeout;
daemon_idle di = { .ptimeout = &timeout };
struct lvmpolld_state ls = { .log_config = "" };
daemon_state s = {

View File

@@ -19,8 +19,6 @@
#include <fcntl.h>
#include <signal.h>
static const char LVM_SYSTEM_DIR[] = "LVM_SYSTEM_DIR=";
static char *_construct_full_lvname(const char *vgname, const char *lvname)
{
char *name;
@@ -54,7 +52,7 @@ static char *_construct_lvm_system_dir_env(const char *sysdir)
*env = '\0';
if (sysdir && dm_snprintf(env, l, "%s%s", LVM_SYSTEM_DIR, sysdir) < 0) {
if (sysdir && dm_snprintf(env, l, "LVM_SYSTEM_DIR=%s", sysdir) < 0) {
dm_free(env);
env = NULL;
}
@@ -261,8 +259,8 @@ static void _pdlv_locked_dump(struct buffer *buff, const struct lvmpolld_lv *pdl
buffer_append(buff, tmp);
if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvm_command_interval=\"%s\"\n", pdlv->sinterval ?: "<undefined>") > 0)
buffer_append(buff, tmp);
if (dm_snprintf(tmp, sizeof(tmp), "\t\t%s\"%s\"\n", LVM_SYSTEM_DIR,
(*pdlv->lvm_system_dir_env ? (pdlv->lvm_system_dir_env + (sizeof(LVM_SYSTEM_DIR) - 1)) : "<undefined>")) > 0)
if (dm_snprintf(tmp, sizeof(tmp), "\t\tLVM_SYSTEM_DIR=\"%s\"\n",
(*pdlv->lvm_system_dir_env ? (pdlv->lvm_system_dir_env + strlen("LVM_SYSTEM_DIR=")) : "<undefined>")) > 0)
buffer_append(buff, tmp);
if (dm_snprintf(tmp, sizeof(tmp), "\t\tlvm_command_pid=%d\n", pdlv->cmd_pid) > 0)
buffer_append(buff, tmp);

View File

@@ -1,248 +0,0 @@
#include "target.h"
// For DM_ARRAY_SIZE!
#include "libdevmapper.h"
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
//----------------------------------------------------------------
static char *_tok_cpy(const char *b, const char *e)
{
char *new = malloc((e - b) + 1);
char *ptr = new;
if (new) {
while (b != e)
*ptr++ = *b++;
*ptr = '\0';
}
return new;
}
static bool _tok_eq(const char *b, const char *e, const char *str)
{
while (b != e) {
if (!*str || *b != *str)
return false;
b++;
str++;
}
return !*str;
}
static bool _parse_operating_mode(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_operating_mode mode;
} _table[] = {
{"recovering", VDO_MODE_RECOVERING},
{"read-only", VDO_MODE_READ_ONLY},
{"normal", VDO_MODE_NORMAL}
};
enum vdo_operating_mode *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].mode;
return true;
}
}
return false;
}
static bool _parse_compression_state(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_compression_state state;
} _table[] = {
{"online", VDO_COMPRESSION_ONLINE},
{"offline", VDO_COMPRESSION_OFFLINE}
};
enum vdo_compression_state *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].state;
return true;
}
}
return false;
}
static bool _parse_recovering(const char *b, const char *e, void *context)
{
bool *r = context;
if (_tok_eq(b, e, "recovering"))
*r = true;
else if (_tok_eq(b, e, "-"))
*r = false;
else
return false;
return true;
}
static bool _parse_index_state(const char *b, const char *e, void *context)
{
static struct {
const char *str;
enum vdo_index_state state;
} _table[] = {
{"error", VDO_INDEX_ERROR},
{"closed", VDO_INDEX_CLOSED},
{"opening", VDO_INDEX_OPENING},
{"closing", VDO_INDEX_CLOSING},
{"offline", VDO_INDEX_OFFLINE},
{"online", VDO_INDEX_ONLINE},
{"unknown", VDO_INDEX_UNKNOWN}
};
enum vdo_index_state *r = context;
unsigned i;
for (i = 0; i < DM_ARRAY_SIZE(_table); i++) {
if (_tok_eq(b, e, _table[i].str)) {
*r = _table[i].state;
return true;
}
}
return false;
}
static bool _parse_uint64(const char *b, const char *e, void *context)
{
uint64_t *r = context, n;
n = 0;
while (b != e) {
if (!isdigit(*b))
return false;
n = (n * 10) + (*b - '0');
b++;
}
*r = n;
return true;
}
static const char *_eat_space(const char *b, const char *e)
{
while (b != e && isspace(*b))
b++;
return b;
}
static const char *_next_tok(const char *b, const char *e)
{
const char *te = b;
while (te != e && !isspace(*te))
te++;
return te == b ? NULL : te;
}
static void _set_error(struct vdo_status_parse_result *result, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
static void _set_error(struct vdo_status_parse_result *result, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vsnprintf(result->error, sizeof(result->error), fmt, ap);
va_end(ap);
}
static bool _parse_field(const char **b, const char *e,
bool (*p_fn)(const char *, const char *, void *),
void *field, const char *field_name,
struct vdo_status_parse_result *result)
{
const char *te;
te = _next_tok(*b, e);
if (!te) {
_set_error(result, "couldn't get token for '%s'", field_name);
return false;
}
if (!p_fn(*b, te, field)) {
_set_error(result, "couldn't parse '%s'", field_name);
return false;
}
*b = _eat_space(te, e);
return true;
}
bool vdo_status_parse(const char *input, struct vdo_status_parse_result *result)
{
const char *b = b = input;
const char *e = input + strlen(input);
const char *te;
struct vdo_status *s = malloc(sizeof(*s));
if (!s) {
_set_error(result, "out of memory");
return false;
}
b = _eat_space(b, e);
te = _next_tok(b, e);
if (!te) {
_set_error(result, "couldn't get token for device");
free(s);
return false;
}
s->device = _tok_cpy(b, te);
if (!s->device) {
_set_error(result, "out of memory");
free(s);
return false;
}
b = _eat_space(te, e);
#define XX(p, f, fn) if (!_parse_field(&b, e, p, f, fn, result)) goto bad;
XX(_parse_operating_mode, &s->operating_mode, "operating mode");
XX(_parse_recovering, &s->recovering, "recovering");
XX(_parse_index_state, &s->index_state, "index state");
XX(_parse_compression_state, &s->compression_state, "compression state");
XX(_parse_uint64, &s->used_blocks, "used blocks");
XX(_parse_uint64, &s->total_blocks, "total blocks");
#undef XX
if (b != e) {
_set_error(result, "too many tokens");
goto bad;
}
result->status = s;
return true;
bad:
free(s->device);
free(s);
return false;
}
//----------------------------------------------------------------

View File

@@ -1,68 +0,0 @@
/*
* Copyright (C) 2018 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef DEVICE_MAPPER_VDO_TARGET_H
#define DEVICE_MAPPER_VDO_TARGET_H
#include <stdbool.h>
#include <stdint.h>
//----------------------------------------------------------------
enum vdo_operating_mode {
VDO_MODE_RECOVERING,
VDO_MODE_READ_ONLY,
VDO_MODE_NORMAL
};
enum vdo_compression_state {
VDO_COMPRESSION_ONLINE,
VDO_COMPRESSION_OFFLINE
};
enum vdo_index_state {
VDO_INDEX_ERROR,
VDO_INDEX_CLOSED,
VDO_INDEX_OPENING,
VDO_INDEX_CLOSING,
VDO_INDEX_OFFLINE,
VDO_INDEX_ONLINE,
VDO_INDEX_UNKNOWN
};
struct vdo_status {
char *device;
enum vdo_operating_mode operating_mode;
bool recovering;
enum vdo_index_state index_state;
enum vdo_compression_state compression_state;
uint64_t used_blocks;
uint64_t total_blocks;
};
void vdo_status_destroy(struct vdo_status *s);
#define VDO_MAX_ERROR 256
struct vdo_status_parse_result {
char error[VDO_MAX_ERROR];
struct vdo_status *status;
};
// Parses the status line from the kernel target.
bool vdo_status_parse(const char *input, struct vdo_status_parse_result *result);
//----------------------------------------------------------------
#endif

View File

@@ -207,10 +207,6 @@ Optional feature arguments are:
block, then the cache block is invalidated.
To enable passthrough mode the cache must be clean.
metadata2 : use version 2 of the metadata. This stores the dirty bits
in a separate btree, which improves speed of shutting
down the cache.
A policy called 'default' is always registered. This is an alias for
the policy we currently think is giving best all round performance.
@@ -290,7 +286,7 @@ message, which takes an arbitrary number of cblock ranges. Each cblock
range's end value is "one past the end", meaning 5-10 expresses a range
of values from 5 to 9. Each cblock must be expressed as a decimal
value, in the future a variant message that takes cblock ranges
expressed in hexadecimal may be needed to better support efficient
expressed in hexidecimal may be needed to better support efficient
invalidation of larger caches. The cache must be in passthrough mode
when invalidate_cblocks is used.

View File

@@ -11,57 +11,23 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
<offset> [<#opt_params> <opt_params>]
<cipher>
Encryption cipher, encryption mode and Initial Vector (IV) generator.
The cipher specifications format is:
cipher[:keycount]-chainmode-ivmode[:ivopts]
Encryption cipher and an optional IV generation mode.
(In format cipher[:keycount]-chainmode-ivmode[:ivopts]).
Examples:
des
aes-cbc-essiv:sha256
aes-xts-plain64
serpent-xts-plain64
twofish-ecb
Cipher format also supports direct specification with kernel crypt API
format (selected by capi: prefix). The IV specification is the same
as for the first format type.
This format is mainly used for specification of authenticated modes.
The crypto API cipher specifications format is:
capi:cipher_api_spec-ivmode[:ivopts]
Examples:
capi:cbc(aes)-essiv:sha256
capi:xts(aes)-plain64
Examples of authenticated modes:
capi:gcm(aes)-random
capi:authenc(hmac(sha256),xts(aes))-random
capi:rfc7539(chacha20,poly1305)-random
The /proc/crypto contains a list of curently loaded crypto modes.
/proc/crypto contains supported crypto modes
<key>
Key used for encryption. It is encoded either as a hexadecimal number
or it can be passed as <key_string> prefixed with single colon
character (':') for keys residing in kernel keyring service.
Key used for encryption. It is encoded as a hexadecimal number.
You can only use key sizes that are valid for the selected cipher
in combination with the selected iv mode.
Note that for some iv modes the key string can contain additional
keys (for example IV seed) so the key contains more parts concatenated
into a single string.
<key_string>
The kernel keyring key is identified by string in following format:
<key_size>:<key_type>:<key_description>.
<key_size>
The encryption key size in bytes. The kernel key payload size must match
the value passed in <key_size>.
<key_type>
Either 'logon' or 'user' kernel key type.
<key_description>
The kernel keyring key description crypt target should look for
when loading key of <key_type>.
<keycount>
Multi-key compatibility mode. You can define <keycount> keys and
then sectors are encrypted according to their offsets (sector 0 uses key0;
@@ -110,32 +76,6 @@ submit_from_crypt_cpus
thread because it benefits CFQ to have writes submitted using the
same context.
integrity:<bytes>:<type>
The device requires additional <bytes> metadata per-sector stored
in per-bio integrity structure. This metadata must by provided
by underlying dm-integrity target.
The <type> can be "none" if metadata is used only for persistent IV.
For Authenticated Encryption with Additional Data (AEAD)
the <type> is "aead". An AEAD mode additionally calculates and verifies
integrity for the encrypted device. The additional space is then
used for storing authentication tag (and persistent IV if needed).
sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
Virtual device will announce this size as a minimal IO and logical sector.
iv_large_sectors
IV generators will use sector number counted in <sector_size> units
instead of default 512 bytes sectors.
For example, if <sector_size> is 4096 bytes, plain64 IV for the second
sector will be 8 (without flag) and 1 if iv_large_sectors is present.
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.
Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
@@ -145,13 +85,7 @@ https://gitlab.com/cryptsetup/cryptsetup
[[
#!/bin/sh
# Create a crypt device using dmsetup
dmsetup create crypt1 --table "0 `blockdev --getsz $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
]]
[[
#!/bin/sh
# Create a crypt device using dmsetup when encryption key is stored in keyring service
dmsetup create crypt2 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 :32:logon:my_prefix:my_key 0 $1 0"
dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
]]
[[

View File

@@ -16,12 +16,12 @@ Example scripts
[[
#!/bin/sh
# Create device delaying rw operation for 500ms
echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
echo "0 `blockdev --getsize $1` delay $1 0 500" | dmsetup create delayed
]]
[[
#!/bin/sh
# Create device delaying only write operation for 500ms and
# splitting reads and writes to different devices $1 $2
echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
echo "0 `blockdev --getsize $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
]]

View File

@@ -42,7 +42,7 @@ Optional feature parameters:
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
'w' is incompatible with drop_writes.
<value>: The value (from 0-255) to write.
<flags>: Perform the replacement only if bio->bi_opf has all the
<flags>: Perform the replacement only if bio->bi_rw has all the
selected flags set.
Examples:

View File

@@ -1,199 +0,0 @@
The dm-integrity target emulates a block device that has additional
per-sector tags that can be used for storing integrity information.
A general problem with storing integrity tags with every sector is that
writing the sector and the integrity tag must be atomic - i.e. in case of
crash, either both sector and integrity tag or none of them is written.
To guarantee write atomicity, the dm-integrity target uses journal, it
writes sector data and integrity tags into a journal, commits the journal
and then copies the data and integrity tags to their respective location.
The dm-integrity target can be used with the dm-crypt target - in this
situation the dm-crypt target creates the integrity data and passes them
to the dm-integrity target via bio_integrity_payload attached to the bio.
In this mode, the dm-crypt and dm-integrity targets provide authenticated
disk encryption - if the attacker modifies the encrypted device, an I/O
error is returned instead of random data.
The dm-integrity target can also be used as a standalone target, in this
mode it calculates and verifies the integrity tag internally. In this
mode, the dm-integrity target can be used to detect silent data
corruption on the disk or in the I/O path.
When loading the target for the first time, the kernel driver will format
the device. But it will only format the device if the superblock contains
zeroes. If the superblock is neither valid nor zeroed, the dm-integrity
target can't be loaded.
To use the target for the first time:
1. overwrite the superblock with zeroes
2. load the dm-integrity target with one-sector size, the kernel driver
will format the device
3. unload the dm-integrity target
4. read the "provided_data_sectors" value from the superblock
5. load the dm-integrity target with the the target size
"provided_data_sectors"
6. if you want to use dm-integrity with dm-crypt, load the dm-crypt target
with the size "provided_data_sectors"
Target arguments:
1. the underlying block device
2. the number of reserved sector at the beginning of the device - the
dm-integrity won't read of write these sectors
3. the size of the integrity tag (if "-" is used, the size is taken from
the internal-hash algorithm)
4. mode:
D - direct writes (without journal) - in this mode, journaling is
not used and data sectors and integrity tags are written
separately. In case of crash, it is possible that the data
and integrity tag doesn't match.
J - journaled writes - data and integrity tags are written to the
journal and atomicity is guaranteed. In case of crash,
either both data and tag or none of them are written. The
journaled mode degrades write throughput twice because the
data have to be written twice.
R - recovery mode - in this mode, journal is not replayed,
checksums are not checked and writes to the device are not
allowed. This mode is useful for data recovery if the
device cannot be activated in any of the other standard
modes.
5. the number of additional arguments
Additional arguments:
journal_sectors:number
The size of journal, this argument is used only if formatting the
device. If the device is already formatted, the value from the
superblock is used.
interleave_sectors:number
The number of interleaved sectors. This values is rounded down to
a power of two. If the device is already formatted, the value from
the superblock is used.
buffer_sectors:number
The number of sectors in one buffer. The value is rounded down to
a power of two.
The tag area is accessed using buffers, the buffer size is
configurable. The large buffer size means that the I/O size will
be larger, but there could be less I/Os issued.
journal_watermark:number
The journal watermark in percents. When the size of the journal
exceeds this watermark, the thread that flushes the journal will
be started.
commit_time:number
Commit time in milliseconds. When this time passes, the journal is
written. The journal is also written immediatelly if the FLUSH
request is received.
internal_hash:algorithm(:key) (the key is optional)
Use internal hash or crc.
When this argument is used, the dm-integrity target won't accept
integrity tags from the upper target, but it will automatically
generate and verify the integrity tags.
You can use a crc algorithm (such as crc32), then integrity target
will protect the data against accidental corruption.
You can also use a hmac algorithm (for example
"hmac(sha256):0123456789abcdef"), in this mode it will provide
cryptographic authentication of the data without encryption.
When this argument is not used, the integrity tags are accepted
from an upper layer target, such as dm-crypt. The upper layer
target should check the validity of the integrity tags.
journal_crypt:algorithm(:key) (the key is optional)
Encrypt the journal using given algorithm to make sure that the
attacker can't read the journal. You can use a block cipher here
(such as "cbc(aes)") or a stream cipher (for example "chacha20",
"salsa20", "ctr(aes)" or "ecb(arc4)").
The journal contains history of last writes to the block device,
an attacker reading the journal could see the last sector nubmers
that were written. From the sector numbers, the attacker can infer
the size of files that were written. To protect against this
situation, you can encrypt the journal.
journal_mac:algorithm(:key) (the key is optional)
Protect sector numbers in the journal from accidental or malicious
modification. To protect against accidental modification, use a
crc algorithm, to protect against malicious modification, use a
hmac algorithm with a key.
This option is not needed when using internal-hash because in this
mode, the integrity of journal entries is checked when replaying
the journal. Thus, modified sector number would be detected at
this stage.
block_size:number
The size of a data block in bytes. The larger the block size the
less overhead there is for per-block integrity metadata.
Supported values are 512, 1024, 2048 and 4096 bytes. If not
specified the default block size is 512 bytes.
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
tables with suspend and resume). The other arguments should not be changed
when reloading the target because the layout of disk data depend on them
and the reloaded target would be non-functional.
The layout of the formatted block device:
* reserved sectors (they are not used by this target, they can be used for
storing LUKS metadata or for other purpose), the size of the reserved
area is specified in the target arguments
* superblock (4kiB)
* magic string - identifies that the device was formatted
* version
* log2(interleave sectors)
* integrity tag size
* the number of journal sections
* provided data sectors - the number of sectors that this target
provides (i.e. the size of the device minus the size of all
metadata and padding). The user of this target should not send
bios that access data beyond the "provided data sectors" limit.
* flags - a flag is set if journal_mac is used
* journal
The journal is divided into sections, each section contains:
* metadata area (4kiB), it contains journal entries
every journal entry contains:
* logical sector (specifies where the data and tag should
be written)
* last 8 bytes of data
* integrity tag (the size is specified in the superblock)
every metadata sector ends with
* mac (8-bytes), all the macs in 8 metadata sectors form a
64-byte value. It is used to store hmac of sector
numbers in the journal section, to protect against a
possibility that the attacker tampers with sector
numbers in the journal.
* commit id
* data area (the size is variable; it depends on how many journal
entries fit into the metadata area)
every sector in the data area contains:
* data (504 bytes of data, the last 8 bytes are stored in
the journal entry)
* commit id
To test if the whole journal section was written correctly, every
512-byte sector of the journal ends with 8-byte commit id. If the
commit id matches on all sectors in a journal section, then it is
assumed that the section was written correctly. If the commit id
doesn't match, the section was written partially and it should not
be replayed.
* one or more runs of interleaved tags and data. Each run contains:
* tag area - it contains integrity tags. There is one tag for each
sector in the data area
* data area - it contains data sectors. The number of data sectors
in one run must be a power of two. log2 of this value is stored
in the superblock.

View File

@@ -16,15 +16,15 @@ Example scripts
[[
#!/bin/sh
# Create an identity mapping for a device
echo "0 `blockdev --getsz $1` linear $1 0" | dmsetup create identity
echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity
]]
[[
#!/bin/sh
# Join 2 devices together
size1=`blockdev --getsz $1`
size2=`blockdev --getsz $2`
size1=`blockdev --getsize $1`
size2=`blockdev --getsize $2`
echo "0 $size1 linear $1 0
$size1 $size2 linear $2 0" | dmsetup create joined
]]
@@ -44,7 +44,7 @@ if (!defined($dev)) {
die("Please specify a device.\n");
}
my $dev_size = `blockdev --getsz $dev`;
my $dev_size = `blockdev --getsize $dev`;
my $extents = int($dev_size / $extent_size) -
(($dev_size % $extent_size) ? 1 : 0);

View File

@@ -14,14 +14,14 @@ Log Ordering
We log things in order of completion once we are sure the write is no longer in
cache. This means that normal WRITE requests are not actually logged until the
next REQ_PREFLUSH request. This is to make it easier for userspace to replay
the log in a way that correlates to what is on disk and not what is in cache,
to make it easier to detect improper waiting/flushing.
next REQ_FLUSH request. This is to make it easier for userspace to replay the
log in a way that correlates to what is on disk and not what is in cache, to
make it easier to detect improper waiting/flushing.
This works by attaching all WRITE requests to a list once the write completes.
Once we see a REQ_PREFLUSH request we splice this list onto the request and once
Once we see a REQ_FLUSH request we splice this list onto the request and once
the FLUSH request completes we log all of the WRITEs and then the FLUSH. Only
completed WRITEs, at the time the REQ_PREFLUSH is issued, are added in order to
completed WRITEs, at the time the REQ_FLUSH is issued, are added in order to
simulate the worst case scenario with regard to power failures. Consider the
following example (W means write, C means complete):

View File

@@ -14,12 +14,8 @@ The target is named "raid" and it accepts the following parameters:
<#raid_devs> <metadata_dev0> <dev0> [.. <metadata_devN> <devN>]
<raid_type>:
raid0 RAID0 striping (no resilience)
raid1 RAID1 mirroring
raid4 RAID4 with dedicated last parity disk
raid5_n RAID5 with dedicated last parity disk supporting takeover
Same as raid4
-Transitory layout
raid4 RAID4 dedicated parity disk
raid5_la RAID5 left asymmetric
- rotating parity 0 with data continuation
raid5_ra RAID5 right asymmetric
@@ -34,19 +30,7 @@ The target is named "raid" and it accepts the following parameters:
- rotating parity N (right-to-left) with data restart
raid6_nc RAID6 N continue
- rotating parity N (right-to-left) with data continuation
raid6_n_6 RAID6 with dedicate parity disks
- parity and Q-syndrome on the last 2 disks;
layout for takeover from/to raid4/raid5_n
raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk
- layout for takeover from raid5_la from/to raid6
raid6_ra_6 Same as "raid5_ra" dedicated last Q-syndrome disk
- layout for takeover from raid5_ra from/to raid6
raid6_ls_6 Same as "raid5_ls" dedicated last Q-syndrome disk
- layout for takeover from raid5_ls from/to raid6
raid6_rs_6 Same as "raid5_rs" dedicated last Q-syndrome disk
- layout for takeover from raid5_rs from/to raid6
raid10 Various RAID10 inspired algorithms chosen by additional params
(see raid10_format and raid10_copies below)
- RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
- RAID1E: Integrated Adjacent Stripe Mirroring
- RAID1E: Integrated Offset Stripe Mirroring
@@ -132,57 +116,10 @@ The target is named "raid" and it accepts the following parameters:
Here we see layouts closely akin to 'RAID1E - Integrated
Offset Stripe Mirroring'.
[delta_disks <N>]
The delta_disks option value (-251 < N < +251) triggers
device removal (negative value) or device addition (positive
value) to any reshape supporting raid levels 4/5/6 and 10.
RAID levels 4/5/6 allow for addition of devices (metadata
and data device tuple), raid10_near and raid10_offset only
allow for device addition. raid10_far does not support any
reshaping at all.
A minimum of devices have to be kept to enforce resilience,
which is 3 devices for raid4/5 and 4 devices for raid6.
[data_offset <sectors>]
This option value defines the offset into each data device
where the data starts. This is used to provide out-of-place
reshaping space to avoid writing over data whilst
changing the layout of stripes, hence an interruption/crash
may happen at any time without the risk of losing data.
E.g. when adding devices to an existing raid set during
forward reshaping, the out-of-place space will be allocated
at the beginning of each raid device. The kernel raid4/5/6/10
MD personalities supporting such device addition will read the data from
the existing first stripes (those with smaller number of stripes)
starting at data_offset to fill up a new stripe with the larger
number of stripes, calculate the redundancy blocks (CRC/Q-syndrome)
and write that new stripe to offset 0. Same will be applied to all
N-1 other new stripes. This out-of-place scheme is used to change
the RAID type (i.e. the allocation algorithm) as well, e.g.
changing from raid5_ls to raid5_n.
[journal_dev <dev>]
This option adds a journal device to raid4/5/6 raid sets and
uses it to close the 'write hole' caused by the non-atomic updates
to the component devices which can cause data loss during recovery.
The journal device is used as writethrough thus causing writes to
be throttled versus non-journaled raid4/5/6 sets.
Takeover/reshape is not possible with a raid4/5/6 journal device;
it has to be deconfigured before requesting these.
[journal_mode <mode>]
This option sets the caching mode on journaled raid4/5/6 raid sets
(see 'journal_dev <dev>' above) to 'writethrough' or 'writeback'.
If 'writeback' is selected the journal device has to be resilient
and must not suffer from the 'write hole' problem itself (e.g. use
raid1 or raid10) to avoid a single point of failure.
<#raid_devs>: The number of devices composing the array.
Each device consists of two entries. The first is the device
containing the metadata (if any); the second is the one containing the
data. A Maximum of 64 metadata/data device entries are supported
up to target version 1.8.0.
1.9.0 supports up to 253 which is enforced by the used MD kernel runtime.
data.
If a drive has failed or is missing at creation time, a '-' can be
given for both the metadata and data drives for a given position.
@@ -258,14 +195,6 @@ recovery. Here is a fuller description of the individual fields:
in RAID1/10 or wrong parity values found in RAID4/5/6.
This value is valid only after a "check" of the array
is performed. A healthy array has a 'mismatch_cnt' of 0.
<data_offset> The current data offset to the start of the user data on
each component device of a raid set (see the respective
raid parameter to support out-of-place reshaping).
<journal_char> 'A' - active write-through journal device.
'a' - active write-back journal device.
'D' - dead journal device.
'-' - no journal device.
Message Interface
-----------------
@@ -278,6 +207,7 @@ include:
"recover"- Initiate/continue a recover process.
"check" - Initiate a check (i.e. a "scrub") of the array.
"repair" - Initiate a repair of the array.
"reshape"- Currently unsupported (-EINVAL).
Discard Support
@@ -327,19 +257,3 @@ Version History
1.5.2 'mismatch_cnt' is zero unless [last_]sync_action is "check".
1.6.0 Add discard support (and devices_handle_discard_safely module param).
1.7.0 Add support for MD RAID0 mappings.
1.8.0 Explicitly check for compatible flags in the superblock metadata
and reject to start the raid set if any are set by a newer
target version, thus avoiding data corruption on a raid set
with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction.
1.9.1 Fix activation of existing RAID 4/10 mapped devices
1.9.2 Don't emit '- -' on the status table line in case the constructor
fails reading a superblock. Correctly emit 'maj:min1 maj:min2' and
'D' on the status line. If '- -' is passed into the constructor, emit
'- -' on the table line and '-' as the status line health character.
1.10.0 Add support for raid4/5/6 journal device
1.10.1 Fix data corruption on reshape request
1.11.0 Fix table line argument order
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option

View File

@@ -37,9 +37,9 @@ if (!$num_devs) {
die("Specify at least one device\n");
}
$min_dev_size = `blockdev --getsz $devs[0]`;
$min_dev_size = `blockdev --getsize $devs[0]`;
for ($i = 1; $i < $num_devs; $i++) {
my $this_size = `blockdev --getsz $devs[$i]`;
my $this_size = `blockdev --getsize $devs[$i]`;
$min_dev_size = ($min_dev_size < $this_size) ?
$min_dev_size : $this_size;
}

Some files were not shown because too many files have changed in this diff Show More