1
0
mirror of git://sourceware.org/git/lvm2.git synced 2026-01-01 16:32:54 +03:00

Compare commits

..

15 Commits

Author SHA1 Message Date
Bryn M. Reeves
63e6ef8491 man: add --headers to dmstats.8.in 2015-07-31 15:10:28 +01:00
Bryn M. Reeves
a20989174f man: document --timestamps in dmstats.8.in 2015-07-31 15:10:28 +01:00
Bryn M. Reeves
8df54e25c6 dmsetup: make '--timestamps' set --headers=time
Make '--timestamps' a shorthand for --headers=time.
2015-07-31 15:10:27 +01:00
Bryn M. Reeves
8ca04e316e dmsetup: add an optional header to dmsetup reports
Add optional headers to dmsetup and dmstats reports. These are
selected by the user in a similar way to report fields, e.g.:

Time: 27/07/15 12:07:56           Count:            0
Name             RgID  ArID  RRqM/s   WRqM/s   R/s   W/s   RSz/s WSz/s AvRqS QSize SvcTm Util% AWait
vg_hex-lv_home       0     0     0.00     0.00  0.00 43.00     0 416.00k 9.50k  1.00  1.86  8.00 30.44
vg_hex-lv_root       0     0     0.00     0.00  0.00  0.00     0       0     0  0.00  0.00  0.00  0.00
vg_hex-lv_images     0     0     0.00     0.00  0.00  0.00     0       0     0  0.00  0.00  0.00  0.00

Selects the 'time' and 'report_count' headers to be output.
2015-07-31 15:10:27 +01:00
Bryn M. Reeves
e8eed522a8 dmsetup: add --timestamps switch
Add a switch to optionally print a timestamp before displaying
each report. Use the same format as iostat for now (ISO format
controlled by S_FORMAT_TIME is also easy to add).
2015-07-31 15:10:27 +01:00
Bryn M. Reeves
cd8e3e28e5 libdm-report: add support for optional report headers
Add the ability to output a reow of header data before the main
report. This can be used to print a repeating banner including
data such as the time, the report count, and system performance
metrics.

A 'header' behaves in a similar way to a field; they are defined
by passing in an array of header types and selected using a string
of names. This allows programs using dm_report to customize the
available set of headers and allow their display to be configured
by the user.

Headers do not participate in any way in sorting or selection and
can only appear in the special 'header' section of the report.

A row of headers is added to a report by passing in a string of
header names to be parsed. Header output is either written as
soon as it is defined (unbuffered) or when the library user calls
the dm_report_output_header() function.
2015-07-31 15:10:27 +01:00
Bryn M. Reeves
8cc8b30ba7 man: add dmstats.8 2015-07-31 15:10:27 +01:00
Bryn M. Reeves
1638e090fc dmsetup: add dmstats command
Add arguments, report types, and a 'stats' command that dm-stats will
use and implement 'clear', 'create', 'delete', 'list', 'print', and
'report' sub-commands.

Adapt _display_info_cols() to allow reporting of statistics with the
DR_STATS report type. Since a single object (device) may have many rows
of statistics to report the call to dm_report_object() is placed inside
a loop over each statistics area present.

For non-stats reports or for devices with a single region spanning the
entire device the body of the loop is executed once.

Regions and the areas that they contain are always traversed in
ascending order beginning with area zero of region zero: all sorting is
handled by the report engine.
2015-07-31 15:10:27 +01:00
Bryn M. Reeves
e33656fc53 dmsetup: rename 'char *argc' and 'char ***argv' in _process_switches
Rename these two variables to 'argcp' and 'argvp' to make it clear
we are dealing with pointers to an 'int argc' and 'char **argv'.
2015-07-31 12:57:35 +01:00
Bryn M. Reeves
13d5b8100a libdevmapper: add statistics data structures and interface
Add data structures, type definitions and interfaces to
libdevmapper to work with device-mapper statistics.

To simplify handling gaps in the sequence of region_ids for users
of the library, and to allow for future changes in the data
structures used to contain statistics values in userspace, the data
structures themselves are not exported in libdevmapper.h.

Instead an opaque handle of type struct dm_stats* is obtained by
calling the library and all subsequent statistics operations are
carried out using this handle. A dm_stats object represents the
complete set of available counter sets for an individual mapped
device.

The dm_stats handle contains a pointer to a table of one or more
dm_stats_region objects representing the regions registered with the
@stats_create message. These in turn point to a table of one or more
dm_stats_counters objects containing the counter sets for each defined
area within the region:

  dm_stats->dm_stats_region[nr_regions]->dm_stats_counters[nr_areas]

This structure is private to the library and may change in future
versions: all users should make use of the public interface and treat
the dm_stats type as an opaque handle. Accessor methods are provided
to obtain values stored in individual region and area objects.

Ranges and counter sets are stored in order of increasing device
sector.

Public methods are provided to create and destroy handles and to
list, create, and destroy, statistics regions as well as to obtain and
parse the counter data.

Linux iostat-style derived performance metrics are provided to return
higher-level performance metrics:

    dm_stats_get_throughput()
    dm_stats_get_utilization()
    dm_stats_get_service_time()
    dm_stats_get_rd_merges_per_sec()
    dm_stats_get_wr_merges_per_sec()
    dm_stats_get_reads_per_sec()
    dm_stats_get_read_sectors_per_sec()
    dm_stats_get_writes_per_sec()
    dm_stats_get_write_sectors_per_sec()
    dm_stats_get_average_request_size()
    dm_stats_get_average_queue_size()
    dm_stats_get_await()
    dm_stats_get_r_await()
    dm_stats_get_w_await()
2015-07-31 12:57:35 +01:00
Bryn M. Reeves
077afe5b23 libdm-report: rename dm_report_headings()
Rename dm_report_headings to dm_report_column_headings() to make
it clear that it's the column headings being output.
2015-07-31 12:17:38 +01:00
Bryn M. Reeves
aa4d97d318 libdm-report: fix row and headings leaks
Not releasing objects back to the pool is fine for short-lived
pools since the memory will be freed when dm_pool_destroy() is
called.

Any pool that may be long-lived needs to be more careful to free
objects back to the pool to avoid leaking memory that will not be
reclaimed until the pool is destroyed at process exit time.

The report pool currently leaks each headings lines and some row
data.

Although dm_report_output() tries to free the first allocated row
this may end up freeing a later row due to sorting of the row list
while reporting. Store a pointer to the first allocated row from
_do_report_obect() instead and free this at the end of
_output_as_columns(), _output_as_rows(), and dm_report_clear().

Also make sure to call dm_pool_free() for the headings line built
in _report_headings().

Without these changes dmstats reports can leak around 600k in 10m
(exact rate depends on fields and values):

 top - 12:11:32 up 4 days,  3:16, 15 users,  load average: 0.01, 0.12, 0.14
  PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
 6473 root      20   0  130196   3124   2792 S   0.0  0.0   0:00.00 dmstats

 top - 12:22:04 up 4 days,  3:26, 15 users,  load average: 0.06, 0.11, 0.13
  PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
 6498 root      20   0  130836   3712   2752 S   0.0  0.0   0:00.60 dmstats

With this patch no increase in RSS is seen:

 top - 13:54:58 up 4 days,  4:59, 15 users,  load average: 0.12, 0.14, 0.14
  PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
13962 root      20   0  130196   2996   2688 S   0.0  0.0   0:00.00 dmstats

 top - 14:04:31 up 4 days,  5:09, 15 users,  load average: 1.02, 0.67, 0.36
  PID USER      PR  NI    VIRT    RES    SHR S  %CPU %MEM     TIME+ COMMAND
13962 root      20   0  130196   2996   2688 S   0.3  0.0   0:00.32 dmstats
2015-07-31 12:06:10 +01:00
Bryn M. Reeves
a4b5f8ef39 libdm-report: add dm_report_clear()
Add a call to clear (abandon) a report's current data. This can
be used by callers that make repeating reports, such as dmstats,
in order to throw away the data of the first iteration (which will
have accumulated over some unknown interval).
2015-07-31 12:06:10 +01:00
Bryn M. Reeves
795c590980 libdm-report: add dm_report_headings()
Add a function to output the headings of columns-based reports
even if they have already been shown.

This will be used by dmstats reports to produce iostat-like
repeating reports of statistics values.

This patch removes a check for RH_HEADINGS_PRINTED from
_report_headings that prevents headings being displaye if the flag
is already set; this check is redundant since the only existing
caller (_output_as_columns()) already tests the flag before
calling the function.
2015-07-31 12:06:10 +01:00
Bryn M. Reeves
ba2da29c4c libdm: add interval support to libdm reports
Add functions for dealing with reports that repeat at a set time
interval:

  dm_report_get_interval()
  dm_report_set_interval()
  dm_report_wait_interval()

The fist should be called following dm_report_init() to set the
desired interval to be used with this report. Once the report has
been prepared the program should call dm_report_wait_interval()
to suspend execution until the current interval expires. When the
wait function returns the caller should obtain and output report
data for the new interval.

Measure the actual wait interval in dm_report_wait_interval and
add dm_report_get_last_interval() so that callers can obtain it
to pass to statistics methods.

Make report interval handling consistent everywhere in libdm by
storing the report interval in nanoseconds and adding additional
helper functions to get and set a value in miliseconds. This is
consistent with the other parts of libdm that handle statistics
intervals and removes the need to convert between different
representations within the library - scaling is only needed to
either present a value to the user or to pass to an external
function that expects a particular unit of time (e.g. usleep()).
2015-07-31 12:06:04 +01:00
417 changed files with 10098 additions and 17506 deletions

View File

@@ -131,9 +131,6 @@ rpm: dist
generate: conf.generate
$(MAKE) -C conf generate
all_man:
$(MAKE) -C man all_man
install_system_dirs:
$(INSTALL_DIR) $(DESTDIR)$(DEFAULT_SYS_DIR)
$(INSTALL_ROOT_DIR) $(DESTDIR)$(DEFAULT_ARCHIVE_DIR)
@@ -153,9 +150,6 @@ install_systemd_generators:
install_systemd_units:
$(MAKE) -C scripts install_systemd_units
install_all_man:
$(MAKE) -C man install_all_man
ifeq ("@PYTHON_BINDINGS@", "yes")
install_python_bindings:
$(MAKE) -C liblvm/python install_python_bindings
@@ -232,9 +226,10 @@ endif
ifneq ($(shell which ctags),)
.PHONY: tags
all: tags
tags:
test -z "$(shell find $(top_srcdir) -type f -name '*.[ch]' -newer tags 2>/dev/null | head -1)" || $(RM) tags
test -f tags || find $(top_srcdir) -maxdepth 5 -type f -name '*.[ch]' -exec ctags -a '{}' +
test -z "$(shell find $(top_srcdir) -type f -name '*.[ch]' -newer tags | head -1)" || $(RM) tags
test -f tags || find $(top_srcdir) -maxdepth 4 -type f -name '*.[ch]' -exec ctags -a '{}' +
CLEAN_TARGETS += tags
DISTCLEAN_TARGETS += tags
endif

View File

@@ -1 +1 @@
2.02.134(2)-git (2015-10-30)
2.02.127(2)-git (2015-07-24)

View File

@@ -1 +1 @@
1.02.111-git (2015-10-30)
1.02.104-git (2015-07-24)

101
WHATS_NEW
View File

@@ -1,103 +1,8 @@
Version 2.02.134 -
====================================
Version 2.02.133 - 30th October 2015
====================================
Support repeated -o|--options for reporting commands.
Support -o- and -o# for reporting commands to remove and compact fields.
Fix missing PVs from pvs output if vgremove is run concurrently.
Remove unwanted error message when running pvs/vgs/lvs and vgremove at once.
Check newly created VG's metadata do not overlap in metadata ring buffer.
Check metadata area size is at least the minimum size defined for the format.
Thin pool targets uses low_water_mark from profile.
Dropping 'yet' from error of unsupported thick snapshot of snapshots.
Do not support unpartitioned DASD devices with CDL formatted with pvcreate.
For thins use flush for suspend only when volume size is reduced.
Enable code which detects the need of flush during suspend.
Ensure --use-policy will resize volume to fit below threshold.
Correct percentage evaluation when checking thin-pool over threshold.
Fix lvmcache to move PV from VG to orphans if VG is removed and lvmetad used.
Fix lvmcache to not cache even invalid info about PV which got removed.
Support checking of memlock daemon counter.
Allow all log levels to be used with the lvmetad -l option.
Add optional shutdown when idle support for lvmetad.
Fix missing in-sync progress info while lvconvert used with lvmpolld.
Add report/compact_output_cols to lvm.conf to define report cols to compact.
Do not change logging in lvm2 library when it's already set.
Check for enough space in thin-pool in command before creating new thin.
Make libblkid detect all copies of the same signature if use_blkid_wiping=1.
Fix vgimportclone with -n to not add number unnecessarily to base VG name.
Cleanup vgimportclone script and remove dependency on awk, grep, cut and tr.
Add vg_missing_pv_count report field to report number of missing PVs in a VG.
Properly identify internal LV holding sanlock locks within lv_role field.
Add metadata_devices and seg_metadata_le_ranges report fields for raid vols.
Fix lvm2-{activation,clvmd,cmirrord,monitor} service to exec before mounting.
Version 2.02.132 - 22nd September 2015
======================================
Fix lvmconf to set locking_type=2 if external locking library is requested.
Remove verbose message when rescanning an unchanged device. (2.02.119)
Add origin_uuid, mirror_log_uuid, move_pv_uuid, convert_lv_uuid report fields.
Add pool_lv_uuid, metadata_lv_uuid, data_lv_uuid reporting fields.
Fix PV label processing failure after pvcreate in lvm shell with lvmetad.
Version 2.02.131 - 15th September 2015
======================================
Rename 'make install_full_man' to install_all_man and add all_man target.
Fix vgimportclone cache_dir path name (2.02.115).
Swapping of LV identifiers handles more complex LVs.
Use passed list of PVS when allocating space in lvconvert --thinpool.
Disallow usage of --stripe and --stripesize when creating cache pool.
Warn user when caching raid or thin pool data LV.
When layering LV, move LV flags with segments.
Ignore persistent cache if configuration changed. (2.02.127)
Fix devices/filter to be applied before disk-accessing filters. (2.02.112)
Make tags only when requested via 'make tags'.
Configure supports --disable-dependency-tracking for one-time builds.
Fix usage of configure.h when building in srcdir != builddir.
Version 2.02.130 - 5th September 2015
=====================================
Fix use of uninitialized device status if reading outdated .cache record.
Restore support for --monitor option in lvcreate (2.02.112).
Read thin-pool data and metadata percent without flush.
Detect blocked thin-pool and avoid scanning their thin volumes.
Check if dm device is usable before checking its size (2.02.116).
Extend parsing of cache_check version in configure.
Make lvpoll error messages visible in lvmpolld's stderr and in syslog.
Add 'make install_full_man' to install all man pages regardless of config.
Version 2.02.129 - 26th August 2015
===================================
Drop error message when vgdisplay encounters an exported VG. (2.02.27)
Fix shared library generation to stop exporting internal functions.(2.02.120)
Accept --cachemode with lvconvert.
Fix and improve reporting properties of cache-pool.
Enable usage of --cachepolicy and --cachesetting with lvconvert.
Don't allow to reduce size of thin-pool metadata.
Fix debug buffer overflows in cmirrord logging.
Add --foreground and --help to cmirrord.
Version 2.02.128 - 17th August 2015
===================================
Allocation setting cache_pool_cachemode is replaced by cache_mode.
Don't attempt to close config file that couldn't be opened.
Check for valid cache mode in validation of cache segment.
Change internal interface handling cache mode and policy.
When no cache policy specified, prefer smq (if available) over mq.
Add demo cache-mq and cache-smq profiles.
Add cmd profilable allocation/cache_policy,cache_settings,cache_mode.
Require cache_check 0.5.4 for use of --clear-needs-check-flag.
Fix lvmetad udev rules to not override SYSTEMD_WANTS, add the service instead.
Version 2.02.127 - 10th August 2015
===================================
Version 2.02.127 -
=================================
Do not init filters, locking, lvmetad, lvmpolld if command doesn't use it.
Order fields in struct cmd_context more logically.
Add lock_type to lvmcache VG summary and info structs.
Recognise vg/lv name format in dmsetup.
Fix regression in cache causing some PVs to bypass filters (2.02.105).
Make configure --enable-realtime the default now.
Update .gitignore and configure.in files to reflect usage of current tree.
Version 2.02.126 - 24th July 2015
=================================

View File

@@ -1,109 +1,6 @@
Version 1.02.111 -
====================================
Version 1.02.110 - 30th October 2015
====================================
Disable thin monitoring plugin when it fails too often (>10 times).
Fix/restore parsing of empty field '-' when processing dmeventd event.
Enhance dm_tree_node_size_changed() to recognize size reduction.
Support exit on idle for dmenventd (1 hour).
Add support to allow unmonitor device from plugin itself.
New design for thread co-operation in dmeventd.
Dmeventd read device status with 'noflush'.
Dmeventd closes control device when no device is monitored.
Thin plugin for dmeventd improved percentage usage.
Snapshot plugin for dmeventd improved percentage usage.
Add dm_hold_control_dev to allow holding of control device open.
Add dm_report_compact_given_fields to remove given empty fields from report.
Use libdm status parsing and local mem raid dmeventd plugin.
Use local mem pool and lock only lvm2 execution for mirror dmeventd plugin.
Lock protect only lvm2 execution for snapshot and thin dmeventd plugin.
Use local mempool for raid and mirror plugins.
Reworked thread initialization for dmeventd plugins.
Dmeventd handles snapshot overflow for now equally as invalid.
Convert dmeventd to use common logging macro system from libdm.
Return -ENOMEM when device registration fails instead of 0 (=success).
Enforce writethrough mode for cleaner policy.
Add support for recognition and deactivation of MD devices to blkdeactivate.
Move target status functions out of libdm-deptree.
Correct use of max_write_behind parameter when generating raid target line.
Fix dm-event systemd service to make sure it is executed before mounting.
Version 1.02.109 - 22nd September 2016
======================================
Update man pages for dmsetup and dmstats.
Improve help text for dmsetup.
Use --noflush and --nolockfs when removing device with --force.
Parse new Overflow status string for snapshot target.
Check dir path components are valid if using dm_create_dir, error out if not.
Fix /dev/mapper handling to remove dangling entries if symlinks are found.
Make it possible to use blank value as selection for string list report field.
Version 1.02.108 - 15th September 2015
======================================
Do not check for full thin pool when activating without messages (1.02.107).
Version 1.02.107 - 5th September 2015
=====================================
Parse thin-pool status with one single routine internally.
Add --histogram to select default histogram fields for list and report.
Add report fields for displaying latency histogram configuration and data.
Add dmstats --bounds to specify histogram boundaries for a new region.
Add dm_histogram_to_string() to format histogram data in string form.
Add public methods to libdm to access numerical histogram config and data.
Parse and store histogram data in dm_stats_list() and dm_stats_populate().
Add an argument to specify histogram bounds to dm_stats_create_region().
Add dm_histogram_bounds_from_{string,uint64_t}() to parse histogram bounds.
Add dm_histogram handle type to represent a latency histogram and its bounds.
Fix devmapper.pc pkgconfig file to not reference non-existent rt.pc file.
Reinstate dm_task_get_info@Base to libdevmapper exports. (1.02.106)
Version 1.02.106 - 26th August 2015
===================================
Add 'precise' column to statistics reports.
Add --precise switch to 'dmstats create' to request nanosecond counters.
Add precise argument to dm_stats_create_region().
Add support to libdm-stats for precise_timestamps
Version 1.02.105 - 17th August 2015
===================================
Fix 'dmstats list -o all' segfault.
Separate dmstats statistics fields from region information fields.
Add interval and interval_ns fields to dmstats reports.
Do not include internal glibc headers in libdm-timestamp.c (1.02.104)
Exit immediately if no device is supplied to dmsetup wipe_table.
Suppress dmsetup report headings when no data is output. (1.02.104)
Adjust dmsetup usage/help output selection to match command invoked.
Fix dmsetup -o all to select correct fields in splitname report.
Restructure internal dmsetup argument handling across all commands.
Add dm_report_is_empty() to indicate there is no data awaiting output.
Add more arg validation for dm_tree_node_add_cache_target().
Add --alldevices switch to replace use of --force for stats create / delete.
Version 1.02.104 - 10th August 2015
===================================
Add dmstats.8 man page
Add dmstats --segments switch to create one region per device segment.
Add dmstats --regionid, --allregions to specify a single / all stats regions.
Add dmstats --allprograms for stats commands that filter by program ID.
Add dmstats --auxdata and --programid args to specify aux data and program ID.
Add report stats sub-command to provide repeating stats reports.
Add clear, delete, list, and print stats sub-commands.
Add create stats sub-command and --start, --length, --areas and --areasize.
Recognize 'dmstats' as an alias for 'dmsetup stats' when run with this name.
Add a 'stats' command to dmsetup to configure, manage and report stats data.
Add statistics fields to dmsetup -o.
Add libdm-stats library to allow management of device-mapper statistics.
Add --nosuffix to suppress dmsetup unit suffixes in report output.
Add --units to control dmsetup report field output units.
Add support to redisplay column headings for repeating column reports.
Fix report header and row resource leaks.
Report timestamps of ioctls with dmsetup -vvv.
Recognize report field name variants without any underscores too.
Add dmsetup --interval and --count to repeat reports at specified intervals.
Version 1.02.104 -
=================================
Add dm_timestamp functions to libdevmapper.
Recognise vg/lv name format in dmsetup.
Move size display code to libdevmapper as dm_size_to_string.
Version 1.02.103 - 24th July 2015
=================================

2
aclocal.m4 vendored
View File

@@ -15,7 +15,7 @@ m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun
# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*-
# serial 1 (pkg-config-0.24)
#
# Copyright (c) 2004 Scott James Remnant <scott@netsplit.com>.
# Copyright © 2004 Scott James Remnant <scott@netsplit.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by

2
conf/.gitignore vendored
View File

@@ -2,5 +2,3 @@ command_profile_template.profile
example.conf
lvmlocal.conf
metadata_profile_template.profile
configure.h
lvm-version.h

View File

@@ -1,5 +1,5 @@
#
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -20,11 +20,7 @@ CONFDEST=lvm.conf
CONFLOCAL=lvmlocal.conf
PROFILE_TEMPLATES=command_profile_template.profile metadata_profile_template.profile
PROFILES=$(PROFILE_TEMPLATES) \
$(srcdir)/cache-mq.profile \
$(srcdir)/cache-smq.profile \
$(srcdir)/thin-generic.profile \
$(srcdir)/thin-performance.profile
PROFILES=$(PROFILE_TEMPLATES) $(srcdir)/thin-generic.profile $(srcdir)/thin-performance.profile
include $(top_builddir)/make.tmpl

View File

@@ -1,20 +0,0 @@
# Demo configuration 'mq' cache policy
#
# Note: This policy has been deprecated in favor of the smq policy
# keyword "default" means, setting is left with kernel defaults.
#
allocation {
cache_pool_chunk_size = 64
cache_mode = "writethrough"
cache_policy = "mq"
cache_settings {
mq {
sequential_threshold = "default" # #nr_sequential_ios
random_threshold = "default" # #nr_random_ios
read_promote_adjustment = "default"
write_promote_adjustment = "default"
discard_promote_adjustment = "default"
}
}
}

View File

@@ -1,14 +0,0 @@
# Demo configuration 'smq' cache policy
#
# The stochastic multi-queue (smq) policy addresses some of the problems
# with the multiqueue (mq) policy and uses less memory.
#
allocation {
cache_pool_chunk_size = 64
cache_mode = "writethrough"
cache_policy = "smq"
cache_settings {
# currently no settins for "smq" policy
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -24,33 +24,34 @@ local {
# Configuration option local/system_id.
# Defines the local system ID for lvmlocal mode.
# This is used when global/system_id_source is set to 'lvmlocal' in the
# main configuration file, e.g. lvm.conf. When used, it must be set to
# a unique value among all hosts sharing access to the storage,
# This is used when global/system_id_source is set
# to 'lvmlocal' in the main configuration file,
# e.g. lvm.conf.
# When used, it must be set to a unique value
# among all hosts sharing access to the storage,
# e.g. a host name.
#
# Example
# Set no system ID:
# Example:
# Set no system ID.
# system_id = ""
# Set the system_id to a specific name:
# Example:
# Set the system_id to the string 'host1'.
# system_id = "host1"
#
# This configuration option has an automatic default value.
# system_id = ""
# Configuration option local/extra_system_ids.
# A list of extra VG system IDs the local host can access.
# VGs with the system IDs listed here (in addition to the host's own
# system ID) can be fully accessed by the local host. (These are
# system IDs that the host sees in VGs, not system IDs that identify
# the local host, which is determined by system_id_source.)
# Use this only after consulting 'man lvmsystemid' to be certain of
# correct usage and possible dangers.
# VGs with the system IDs listed here (in addition
# to the host's own system ID) can be fully accessed
# by the local host. (These are system IDs that the
# host sees in VGs, not system IDs that identify the
# local host, which is determined by system_id_source.)
# Use this only after consulting 'man lvmsystemid'
# to be certain of correct usage and possible dangers.
# This configuration option does not have a default value defined.
# Configuration option local/host_id.
# The lvmlockd sanlock host_id.
# This must be unique among all hosts, and must be between 1 and 2000.
# This configuration option has an automatic default value.
# This must be a unique among all hosts,
# and must be between 1 and 2000.
# host_id = 0
}

119
configure vendored
View File

@@ -643,7 +643,6 @@ LVMETAD_PIDFILE
DMEVENTD_PIDFILE
WRITE_INSTALL
VALGRIND_POOL
USE_TRACKING
UDEV_HAS_BUILTIN_BLKID
UDEV_RULE_EXEC_DETECTION
UDEV_SYSTEMD_BACKGROUND_JOBS
@@ -660,13 +659,12 @@ SELINUX_PC
SELINUX_LIBS
REPLICATORS
READLINE_LIBS
RT_LIB
RT_PC
RAID
PYTHON_LIBDIRS
PYTHON_INCDIRS
PYTHON_BINDINGS
PTHREAD_LIBS
M_LIBS
POOL
PKGCONFIG
OCFDIR
@@ -694,6 +692,7 @@ BLKDEACTIVATE
FSADM
ELDFLAGS
DM_LIB_PATCHLEVEL
DM_LIB_VERSION
DMEVENTD_PATH
DMEVENTD
DL_LIBS
@@ -876,7 +875,6 @@ SHELL'
ac_subst_files=''
ac_user_opts='
enable_option_checking
enable_dependency_tracking
enable_static_link
with_user
with_group
@@ -1635,8 +1633,6 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
--disable-dependency-tracking
speeds up one-time build.
--enable-static_link use this to link the tools to their libraries
statically (default is dynamic linking
--enable-lvm1_fallback use this to fall back and use LVM1 binaries if
@@ -2969,7 +2965,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
ac_config_headers="$ac_config_headers include/configure.h"
ac_config_headers="$ac_config_headers lib/misc/configure.h"
################################################################################
@@ -5738,7 +5734,7 @@ fi
done
for ac_header in termios.h sys/statvfs.h sys/timerfd.h
for ac_header in termios.h sys/statvfs.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
@@ -7635,19 +7631,6 @@ done
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable dependency tracking" >&5
$as_echo_n "checking whether to enable dependency tracking... " >&6; }
# Check whether --enable-dependency-tracking was given.
if test "${enable_dependency_tracking+set}" = set; then :
enableval=$enable_dependency_tracking; USE_TRACKING=$enableval
else
USE_TRACKING=yes
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_TRACKING" >&5
$as_echo "$USE_TRACKING" >&6; }
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use static linking" >&5
$as_echo_n "checking whether to use static linking... " >&6; }
@@ -8830,27 +8813,20 @@ $as_echo "$as_me: WARNING: cache_check not found in path $PATH" >&2;}
fi
fi
if test "$CACHE_CHECK_NEEDS_CHECK" = yes; then
$CACHE_CHECK_CMD -V 2>/dev/null >conftest.tmp
read -r CACHE_CHECK_VSN < conftest.tmp
IFS=.- read -r CACHE_CHECK_VSN_MAJOR CACHE_CHECK_VSN_MINOR CACHE_CHECK_VSN_PATCH LEFTOVER < conftest.tmp
rm -f conftest.tmp
CACHE_CHECK_VSN=`"$CACHE_CHECK_CMD" -V 2>/dev/null`
CACHE_CHECK_VSN_MAJOR=`echo "$CACHE_CHECK_VSN" | $AWK -F '.' '{print $1}'`
CACHE_CHECK_VSN_MINOR=`echo "$CACHE_CHECK_VSN" | $AWK -F '.' '{print $2}'`
# Require version >= 0.5.4 for --clear-needs-check-flag
if test -z "$CACHE_CHECK_VSN_MAJOR" \
|| test -z "$CACHE_CHECK_VSN_MINOR" \
|| test -z "$CACHE_CHECK_VSN_PATCH"; then
if test -z "$CACHE_CHECK_VSN_MAJOR" -o -z "$CACHE_CHECK_VSN_MINOR"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $CACHE_CHECK_CMD: Bad version \"$CACHE_CHECK_VSN\" found" >&5
$as_echo "$as_me: WARNING: $CACHE_CHECK_CMD: Bad version \"$CACHE_CHECK_VSN\" found" >&2;}
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
elif test "$CACHE_CHECK_VSN_MAJOR" -eq 0 ; then
if test "$CACHE_CHECK_VSN_MINOR" -lt 5 \
|| test "$CACHE_CHECK_VSN_MINOR" -eq 5 -a "$CACHE_CHECK_VSN_PATCH" -lt 4; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $CACHE_CHECK_CMD: Old version \"$CACHE_CHECK_VSN\" found" >&5
elif test "$CACHE_CHECK_VSN_MAJOR" -eq 0 -a "$CACHE_CHECK_VSN_MINOR" -lt 5; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $CACHE_CHECK_CMD: Old version \"$CACHE_CHECK_VSN\" found" >&5
$as_echo "$as_me: WARNING: $CACHE_CHECK_CMD: Old version \"$CACHE_CHECK_VSN\" found" >&2;}
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
fi
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
fi
fi
# Empty means a config way to ignore cache dumping
@@ -12586,50 +12562,6 @@ if [ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
as_fn_error $? "Features cannot be 'shared' when building statically" "$LINENO" 5
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for log10 in -lm" >&5
$as_echo_n "checking for log10 in -lm... " >&6; }
if ${ac_cv_lib_m_log10+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
LIBS="-lm $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
/* Override any GCC internal prototype to avoid an error.
Use char because int might match the return type of a GCC
builtin and then its argument prototype would still apply. */
#ifdef __cplusplus
extern "C"
#endif
char log10 ();
int
main ()
{
return log10 ();
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
ac_cv_lib_m_log10=yes
else
ac_cv_lib_m_log10=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_log10" >&5
$as_echo "$ac_cv_lib_m_log10" >&6; }
if test "x$ac_cv_lib_m_log10" = xyes; then :
M_LIBS="-lm"
else
hard_bailout
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_mutex_lock in -lpthread" >&5
$as_echo_n "checking for pthread_mutex_lock in -lpthread... " >&6; }
@@ -12863,11 +12795,13 @@ fi
$as_echo "#define HAVE_REALTIME 1" >>confdefs.h
LIBS="-lrt $LIBS"
RT_LIB="-lrt"
RT_PC="librt"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Disabling realtime clock" >&5
$as_echo "$as_me: WARNING: Disabling realtime clock" >&2;}
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $HAVE_REALTIME" >&5
$as_echo "$HAVE_REALTIME" >&6; }
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for struct stat has st_ctim." >&5
@@ -13884,14 +13818,16 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
clvmd_prefix=$ac_default_prefix
test "$prefix" != NONE && clvmd_prefix=$prefix
CLVMD_PATH="$clvmd_prefix/sbin/clvmd"
if test "$CLVMD" != none; then
clvmd_prefix=$ac_default_prefix
test "$prefix" != NONE && clvmd_prefix=$prefix
CLVMD_PATH="$clvmd_prefix/sbin/clvmd"
cat >>confdefs.h <<_ACEOF
#define CLVMD_PATH "$CLVMD_PATH"
_ACEOF
fi
################################################################################
if test "$BUILD_DMEVENTD" = yes; then
@@ -14042,18 +13978,18 @@ test "$interface" != ioctl && as_fn_error $? "--with-interface=ioctl required. f
$as_echo "$interface" >&6; }
################################################################################
read DM_LIB_VERSION < "$srcdir"/VERSION_DM 2>/dev/null || DM_LIB_VERSION=Unknown
DM_LIB_VERSION="\"`cat "$srcdir"/VERSION_DM 2>/dev/null || echo Unknown`\""
cat >>confdefs.h <<_ACEOF
#define DM_LIB_VERSION "$DM_LIB_VERSION"
#define DM_LIB_VERSION $DM_LIB_VERSION
_ACEOF
DM_LIB_PATCHLEVEL=`cat "$srcdir"/VERSION_DM | $AWK -F '[-. ]' '{printf "%s.%s.%s",$1,$2,$3}'`
read VER < "$srcdir"/VERSION 2>/dev/null || VER=Unknown
LVM_VERSION="\"`cat "$srcdir"/VERSION 2>/dev/null || echo Unknown`\""
LVM_VERSION=\"$VER\"
VER=`cat "$srcdir"/VERSION`
LVM_RELEASE_DATE="\"`echo $VER | $SED 's/.* (//;s/).*//'`\""
VER=`echo "$VER" | $AWK '{print $1}'`
LVM_RELEASE="\"`echo "$VER" | $AWK -F '-' '{print $2}'`\""
@@ -14208,11 +14144,10 @@ LVM_LIBAPI=`echo "$VER" | $AWK -F '[()]' '{print $2}'`
################################################################################
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile lib/replicator/Makefile include/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile"
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile lib/replicator/Makefile lib/misc/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
@@ -14906,7 +14841,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"include/configure.h") CONFIG_HEADERS="$CONFIG_HEADERS include/configure.h" ;;
"lib/misc/configure.h") CONFIG_HEADERS="$CONFIG_HEADERS lib/misc/configure.h" ;;
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"make.tmpl") CONFIG_FILES="$CONFIG_FILES make.tmpl" ;;
"daemons/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/Makefile" ;;
@@ -14936,7 +14871,7 @@ do
"lib/locking/Makefile") CONFIG_FILES="$CONFIG_FILES lib/locking/Makefile" ;;
"lib/mirror/Makefile") CONFIG_FILES="$CONFIG_FILES lib/mirror/Makefile" ;;
"lib/replicator/Makefile") CONFIG_FILES="$CONFIG_FILES lib/replicator/Makefile" ;;
"include/lvm-version.h") CONFIG_FILES="$CONFIG_FILES include/lvm-version.h" ;;
"lib/misc/lvm-version.h") CONFIG_FILES="$CONFIG_FILES lib/misc/lvm-version.h" ;;
"lib/raid/Makefile") CONFIG_FILES="$CONFIG_FILES lib/raid/Makefile" ;;
"lib/snapshot/Makefile") CONFIG_FILES="$CONFIG_FILES lib/snapshot/Makefile" ;;
"lib/thin/Makefile") CONFIG_FILES="$CONFIG_FILES lib/thin/Makefile" ;;

View File

@@ -16,7 +16,7 @@ AC_PREREQ(2.61)
dnl -- Process this file with autoconf to produce a configure script.
AC_INIT
AC_CONFIG_SRCDIR([lib/device/dev-cache.h])
AC_CONFIG_HEADERS([include/configure.h])
AC_CONFIG_HEADERS([lib/misc/configure.h])
################################################################################
dnl -- Setup the directory where autoconf has auxilary files
@@ -103,7 +103,7 @@ AC_CHECK_HEADERS([assert.h ctype.h dirent.h errno.h fcntl.h float.h \
sys/time.h sys/types.h sys/utsname.h sys/wait.h time.h \
unistd.h], , [AC_MSG_ERROR(bailing out)])
AC_CHECK_HEADERS(termios.h sys/statvfs.h sys/timerfd.h)
AC_CHECK_HEADERS(termios.h sys/statvfs.h)
case "$host_os" in
linux*)
@@ -155,15 +155,6 @@ AC_FUNC_STAT
AC_FUNC_STRTOD
AC_FUNC_VPRINTF
################################################################################
dnl -- Disable dependency tracking
AC_MSG_CHECKING(whether to enable dependency tracking)
AC_ARG_ENABLE(dependency-tracking,
AC_HELP_STRING([--disable-dependency-tracking],
[speeds up one-time build.]),
USE_TRACKING=$enableval, USE_TRACKING=yes)
AC_MSG_RESULT($USE_TRACKING)
################################################################################
dnl -- Enables statically-linked tools
AC_MSG_CHECKING(whether to use static linking)
@@ -593,25 +584,18 @@ case "$CACHE" in
fi
fi
if test "$CACHE_CHECK_NEEDS_CHECK" = yes; then
$CACHE_CHECK_CMD -V 2>/dev/null >conftest.tmp
read -r CACHE_CHECK_VSN < conftest.tmp
IFS=.- read -r CACHE_CHECK_VSN_MAJOR CACHE_CHECK_VSN_MINOR CACHE_CHECK_VSN_PATCH LEFTOVER < conftest.tmp
rm -f conftest.tmp
CACHE_CHECK_VSN=`"$CACHE_CHECK_CMD" -V 2>/dev/null`
CACHE_CHECK_VSN_MAJOR=`echo "$CACHE_CHECK_VSN" | $AWK -F '.' '{print $1}'`
CACHE_CHECK_VSN_MINOR=`echo "$CACHE_CHECK_VSN" | $AWK -F '.' '{print $2}'`
# Require version >= 0.5.4 for --clear-needs-check-flag
if test -z "$CACHE_CHECK_VSN_MAJOR" \
|| test -z "$CACHE_CHECK_VSN_MINOR" \
|| test -z "$CACHE_CHECK_VSN_PATCH"; then
if test -z "$CACHE_CHECK_VSN_MAJOR" -o -z "$CACHE_CHECK_VSN_MINOR"; then
AC_MSG_WARN([$CACHE_CHECK_CMD: Bad version "$CACHE_CHECK_VSN" found])
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
elif test "$CACHE_CHECK_VSN_MAJOR" -eq 0 ; then
if test "$CACHE_CHECK_VSN_MINOR" -lt 5 \
|| test "$CACHE_CHECK_VSN_MINOR" -eq 5 -a "$CACHE_CHECK_VSN_PATCH" -lt 4; then
AC_MSG_WARN([$CACHE_CHECK_CMD: Old version "$CACHE_CHECK_VSN" found])
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
fi
elif test "$CACHE_CHECK_VSN_MAJOR" -eq 0 -a "$CACHE_CHECK_VSN_MINOR" -lt 5; then
AC_MSG_WARN([$CACHE_CHECK_CMD: Old version "$CACHE_CHECK_VSN" found])
CACHE_CHECK_VERSION_WARN=y
CACHE_CHECK_NEEDS_CHECK=no
fi
fi
# Empty means a config way to ignore cache dumping
@@ -1546,10 +1530,6 @@ if [[ \( "$LVM1" = shared -o "$POOL" = shared -o "$CLUSTER" = shared \
AC_MSG_ERROR([Features cannot be 'shared' when building statically])
fi
################################################################################
AC_CHECK_LIB(m, log10,
[M_LIBS="-lm"], hard_bailout)
################################################################################
AC_CHECK_LIB([pthread], [pthread_mutex_lock],
[PTHREAD_LIBS="-lpthread"], hard_bailout)
@@ -1590,10 +1570,11 @@ if test "$REALTIME" = yes; then
if test "$HAVE_REALTIME" = yes; then
AC_DEFINE([HAVE_REALTIME], 1, [Define to 1 to include support for realtime clock.])
LIBS="-lrt $LIBS"
RT_LIB="-lrt"
RT_PC="librt"
else
AC_MSG_WARN(Disabling realtime clock)
fi
AC_MSG_RESULT($HAVE_REALTIME)
fi
dnl Check if the system has struct stat st_ctim.
@@ -1796,10 +1777,12 @@ test "$lvm_exec_prefix" = NONE && lvm_exec_prefix=$ac_default_prefix
LVM_PATH="$lvm_exec_prefix/sbin/lvm"
AC_DEFINE_UNQUOTED(LVM_PATH, ["$LVM_PATH"], [Path to lvm binary.])
clvmd_prefix=$ac_default_prefix
test "$prefix" != NONE && clvmd_prefix=$prefix
CLVMD_PATH="$clvmd_prefix/sbin/clvmd"
AC_DEFINE_UNQUOTED(CLVMD_PATH, ["$CLVMD_PATH"], [Path to clvmd binary.])
if test "$CLVMD" != none; then
clvmd_prefix=$ac_default_prefix
test "$prefix" != NONE && clvmd_prefix=$prefix
CLVMD_PATH="$clvmd_prefix/sbin/clvmd"
AC_DEFINE_UNQUOTED(CLVMD_PATH, ["$CLVMD_PATH"], [Path to clvmd binary.])
fi
################################################################################
dnl -- dmeventd pidfile and executable path
@@ -1888,14 +1871,14 @@ test "$interface" != ioctl && AC_MSG_ERROR([--with-interface=ioctl required. fs
AC_MSG_RESULT($interface)
################################################################################
read DM_LIB_VERSION < "$srcdir"/VERSION_DM 2>/dev/null || DM_LIB_VERSION=Unknown
AC_DEFINE_UNQUOTED(DM_LIB_VERSION, "$DM_LIB_VERSION", [Library version])
DM_LIB_VERSION="\"`cat "$srcdir"/VERSION_DM 2>/dev/null || echo Unknown`\""
AC_DEFINE_UNQUOTED(DM_LIB_VERSION, $DM_LIB_VERSION, [Library version])
DM_LIB_PATCHLEVEL=`cat "$srcdir"/VERSION_DM | $AWK -F '[[-. ]]' '{printf "%s.%s.%s",$1,$2,$3}'`
read VER < "$srcdir"/VERSION 2>/dev/null || VER=Unknown
LVM_VERSION="\"`cat "$srcdir"/VERSION 2>/dev/null || echo Unknown`\""
LVM_VERSION=\"$VER\"
VER=`cat "$srcdir"/VERSION`
LVM_RELEASE_DATE="\"`echo $VER | $SED 's/.* (//;s/).*//'`\""
VER=`echo "$VER" | $AWK '{print $1}'`
LVM_RELEASE="\"`echo "$VER" | $AWK -F '-' '{print $2}'`\""
@@ -1963,6 +1946,7 @@ AC_SUBST(DLM_LIBS)
AC_SUBST(DL_LIBS)
AC_SUBST(DMEVENTD)
AC_SUBST(DMEVENTD_PATH)
AC_SUBST(DM_LIB_VERSION)
AC_SUBST(DM_LIB_PATCHLEVEL)
AC_SUBST(ELDFLAGS)
AC_SUBST(FSADM)
@@ -1993,7 +1977,6 @@ AC_SUBST(OCF)
AC_SUBST(OCFDIR)
AC_SUBST(PKGCONFIG)
AC_SUBST(POOL)
AC_SUBST(M_LIBS)
AC_SUBST(PTHREAD_LIBS)
AC_SUBST(PYTHON)
AC_SUBST(PYTHON_BINDINGS)
@@ -2002,7 +1985,7 @@ AC_SUBST(PYTHON_LIBDIRS)
AC_SUBST(QUORUM_CFLAGS)
AC_SUBST(QUORUM_LIBS)
AC_SUBST(RAID)
AC_SUBST(RT_LIB)
AC_SUBST(RT_PC)
AC_SUBST(READLINE_LIBS)
AC_SUBST(REPLICATORS)
AC_SUBST(SACKPT_CFLAGS)
@@ -2031,7 +2014,6 @@ AC_SUBST(UDEV_SYNC)
AC_SUBST(UDEV_SYSTEMD_BACKGROUND_JOBS)
AC_SUBST(UDEV_RULE_EXEC_DETECTION)
AC_SUBST(UDEV_HAS_BUILTIN_BLKID)
AC_SUBST(USE_TRACKING)
AC_SUBST(VALGRIND_POOL)
AC_SUBST(WRITE_INSTALL)
AC_SUBST(DMEVENTD_PIDFILE)
@@ -2086,7 +2068,7 @@ lib/format_pool/Makefile
lib/locking/Makefile
lib/mirror/Makefile
lib/replicator/Makefile
include/lvm-version.h
lib/misc/lvm-version.h
lib/raid/Makefile
lib/snapshot/Makefile
lib/thin/Makefile

View File

@@ -154,7 +154,7 @@ static void usage(const char *prog, FILE *file)
{
fprintf(file, "Usage: %s [options]\n"
" -C Sets debug level (from -d) on all clvmd instances clusterwide\n"
" -d[<n>] Set debug logging (0:none, 1:stderr (implies -f option), 2:syslog)\n"
" -d[n] Set debug logging (0:none, 1:stderr (implies -f option), 2:syslog)\n"
" -E<uuid> Take this lock uuid as exclusively locked resource (for restart)\n"
" -f Don't fork, run in the foreground\n"
" -h Show this help information\n"
@@ -855,12 +855,12 @@ static void main_loop(int cmd_timeout)
int quorate = clops->is_quorate();
int client_count = 0;
int max_fd = 0;
struct local_client *lastfd = &local_client_head;
struct local_client *nextfd = local_client_head.next;
/* Wait on the cluster FD and all local sockets/pipes */
local_client_head.fd = clops->get_main_cluster_fd();
FD_ZERO(&in);
struct local_client *lastfd = &local_client_head;
struct local_client *nextfd = local_client_head.next;
for (thisfd = &local_client_head; thisfd; thisfd = thisfd->next) {
client_count++;

View File

@@ -15,7 +15,6 @@
#include "link_mon.h"
#include "local.h"
#include <getopt.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/socket.h>
@@ -33,49 +32,14 @@ static void daemonize(void);
static void init_all(void);
static void cleanup_all(void);
static void usage (FILE *dest)
int main(int argc __attribute__((unused)), char *argv[] __attribute__((unused)))
{
fprintf (dest, "Usage: cmirrord [options]\n"
" -f, --foreground stay in the foreground, log to the terminal\n"
" -h, --help print this help\n");
}
int main(int argc, char *argv[])
{
int foreground_mode = 0;
struct option longopts[] = {
{ "foreground", no_argument, NULL, 'f' },
{ "help" , no_argument, NULL, 'h' },
{ 0, 0, 0, 0 }
};
int opt;
while ((opt = getopt_long (argc, argv, "fh", longopts, NULL)) != -1) {
switch (opt) {
case 'f':
foreground_mode = 1;
break;
case 'h':
usage (stdout);
exit (0);
default:
usage (stderr);
exit (2);
}
}
if (optind < argc) {
usage (stderr);
exit (2);
}
if (!foreground_mode)
daemonize();
daemonize();
init_all();
/* Parent can now exit, we're ready to handle requests */
if (!foreground_mode)
kill(getppid(), SIGTERM);
kill(getppid(), SIGTERM);
LOG_PRINT("Starting cmirrord:");
LOG_PRINT(" Built: "__DATE__" "__TIME__"\n");
@@ -245,16 +209,6 @@ static void daemonize(void)
}
LOG_OPEN("cmirrord", LOG_PID, LOG_DAEMON);
}
/*
* init_all
*
* Initialize modules. Exit on failure.
*/
static void init_all(void)
{
int r;
(void) dm_prepare_selinux_context(CMIRRORD_PIDFILE, S_IFREG);
if (dm_create_lockfile(CMIRRORD_PIDFILE) == 0)
@@ -273,6 +227,16 @@ static void init_all(void)
signal(SIGUSR2, &sig_handler);
sigemptyset(&signal_mask);
signal_received = 0;
}
/*
* init_all
*
* Initialize modules. Exit on failure.
*/
static void init_all(void)
{
int r;
if ((r = init_local()) ||
(r = init_cluster())) {

View File

@@ -104,11 +104,10 @@ static SaVersionT version = { 'B', 1, 1 };
#endif
#define DEBUGGING_HISTORY 100
#define DEBUGGING_BUFLEN 128
#define LOG_SPRINT(cc, f, arg...) do { \
cc->idx++; \
cc->idx = cc->idx % DEBUGGING_HISTORY; \
snprintf(cc->debugging[cc->idx], DEBUGGING_BUFLEN, f, ## arg); \
sprintf(cc->debugging[cc->idx], f, ## arg); \
} while (0)
static int log_resp_rec = 0;
@@ -151,7 +150,7 @@ struct clog_cpg {
uint32_t checkpoint_requesters[MAX_CHECKPOINT_REQUESTERS];
struct checkpoint_data *checkpoint_list;
int idx;
char debugging[DEBUGGING_HISTORY][DEBUGGING_BUFLEN];
char debugging[DEBUGGING_HISTORY][128];
};
static struct dm_list clog_cpg_list;
@@ -1295,9 +1294,7 @@ static void cpg_join_callback(struct clog_cpg *match,
uint32_t my_pid = (uint32_t)getpid();
uint32_t lowest = match->lowest_id;
struct clog_request *rq;
char dbuf[64] = { 0 };
char *dbuf_p = dbuf;
size_t dbuf_rem = sizeof dbuf;
char dbuf[32] = { 0 };
/* Assign my_cluster_id */
if ((my_cluster_id == 0xDEAD) && (joined->pid == my_pid))
@@ -1313,17 +1310,9 @@ static void cpg_join_callback(struct clog_cpg *match,
if (joined->nodeid == my_cluster_id)
goto out;
for (i = 0; i < member_list_entries - 1; i++) {
int written = snprintf(dbuf_p, dbuf_rem, "%u-", member_list[i].nodeid);
if (written < 0) continue; /* impossible */
if ((unsigned)written >= dbuf_rem) {
dbuf_rem = 0;
break;
}
dbuf_rem -= written;
dbuf_p += written;
}
snprintf(dbuf_p, dbuf_rem, "(%u)", joined->nodeid);
for (i = 0; i < member_list_entries - 1; i++)
sprintf(dbuf+strlen(dbuf), "%u-", member_list[i].nodeid);
sprintf(dbuf+strlen(dbuf), "(%u)", joined->nodeid);
LOG_COND(log_checkpoint, "[%s] Joining node, %u needs checkpoint [%s]",
SHORT_UUID(match->name.value), joined->nodeid, dbuf);

View File

@@ -32,13 +32,12 @@
#define LOG_OFFSET 2
#define RESYNC_HISTORY 50
#define RESYNC_BUFLEN 128
//static char resync_history[RESYNC_HISTORY][128];
//static int idx = 0;
#define LOG_SPRINT(_lc, f, arg...) do { \
lc->idx++; \
lc->idx = lc->idx % RESYNC_HISTORY; \
snprintf(lc->resync_history[lc->idx], RESYNC_BUFLEN, f, ## arg); \
sprintf(lc->resync_history[lc->idx], f, ## arg); \
} while (0)
struct log_header {
@@ -89,7 +88,7 @@ struct log_c {
size_t disk_size; /* size of disk_buffer in bytes */
void *disk_buffer; /* aligned memory for O_DIRECT */
int idx;
char resync_history[RESYNC_HISTORY][RESYNC_BUFLEN];
char resync_history[RESYNC_HISTORY][128];
};
struct mark_entry {
@@ -1445,7 +1444,7 @@ static int disk_status_info(struct log_c *lc, struct dm_ulog_request *rq)
char *data = (char *)rq->data;
struct stat statbuf;
if (fstat(lc->disk_fd, &statbuf)) {
if(fstat(lc->disk_fd, &statbuf)) {
rq->error = -errno;
return -errno;
}
@@ -1508,7 +1507,7 @@ static int disk_status_table(struct log_c *lc, struct dm_ulog_request *rq)
char *data = (char *)rq->data;
struct stat statbuf;
if (fstat(lc->disk_fd, &statbuf)) {
if(fstat(lc->disk_fd, &statbuf)) {
rq->error = -errno;
return -errno;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
@@ -12,9 +12,9 @@
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "dm-logging.h"
#include "dmlib.h"
#include "libdevmapper-event.h"
//#include "libmultilog.h"
#include "dmeventd.h"
#include <fcntl.h>
@@ -23,11 +23,7 @@
#include <sys/stat.h>
#include <sys/wait.h>
#include <arpa/inet.h> /* for htonl, ntohl */
#include <pthread.h>
#include <syslog.h>
static int _debug_level = 0;
static int _use_syslog = 0;
static int _sequence_nr = 0;
struct dm_event_handler {
@@ -198,7 +194,7 @@ static int _check_message_id(struct dm_event_daemon_message *msg)
if ((sscanf(msg->data, "%d:%d", &pid, &seq_nr) != 2) ||
(pid != getpid()) || (seq_nr != _sequence_nr)) {
log_error("Ignoring out-of-sequence reply from dmeventd. "
"Expected %d:%d but received %s.", getpid(),
"Expected %d:%d but received %s", getpid(),
_sequence_nr, msg->data);
return 0;
}
@@ -233,7 +229,7 @@ static int _daemon_read(struct dm_event_fifos *fifos,
FD_SET(fifos->server, &fds);
ret = select(fifos->server + 1, &fds, NULL, NULL, &tval);
if (ret < 0 && errno != EINTR) {
log_error("Unable to read from event server.");
log_error("Unable to read from event server");
return 0;
}
if ((ret == 0) && (i > 4) && !bytes) {
@@ -299,7 +295,7 @@ static int _daemon_write(struct dm_event_fifos *fifos,
if (ret < 0) {
if (errno == EINTR)
continue;
log_error("Unable to talk to event daemon.");
log_error("Unable to talk to event daemon");
return 0;
}
if (ret == 0)
@@ -308,7 +304,7 @@ static int _daemon_write(struct dm_event_fifos *fifos,
if (ret < 0) {
if ((errno == EINTR) || (errno == EAGAIN))
continue;
log_error("Unable to talk to event daemon.");
log_error("Unable to talk to event daemon");
return 0;
}
}
@@ -320,7 +316,7 @@ static int _daemon_write(struct dm_event_fifos *fifos,
FD_SET(fifos->client, &fds);
ret = select(fifos->client + 1, NULL, &fds, NULL, NULL);
if ((ret < 0) && (errno != EINTR)) {
log_error("Unable to talk to event daemon.");
log_error("Unable to talk to event daemon");
return 0;
}
} while (ret < 1);
@@ -330,7 +326,7 @@ static int _daemon_write(struct dm_event_fifos *fifos,
if ((errno == EINTR) || (errno == EAGAIN))
continue;
else {
log_error("Unable to talk to event daemon.");
log_error("Unable to talk to event daemon");
return 0;
}
}
@@ -360,7 +356,7 @@ int daemon_talk(struct dm_event_fifos *fifos,
getpid(), _sequence_nr,
dso_name ? : "-", dev_name ? : "-", evmask, timeout)))
< 0) {
log_error("_daemon_talk: message allocation failed.");
log_error("_daemon_talk: message allocation failed");
return -ENOMEM;
}
msg->cmd = cmd;
@@ -448,11 +444,11 @@ static int _start_daemon(char *dmeventd_path, struct dm_event_fifos *fifos)
else if (!pid) {
execvp(args[0], args);
log_error("Unable to exec dmeventd: %s.", strerror(errno));
log_error("Unable to exec dmeventd: %s", strerror(errno));
_exit(EXIT_FAILURE);
} else {
if (waitpid(pid, &status, 0) < 0)
log_error("Unable to start dmeventd: %s.",
log_error("Unable to start dmeventd: %s",
strerror(errno));
else if (WEXITSTATUS(status))
log_error("Unable to start dmeventd.");
@@ -525,7 +521,7 @@ static struct dm_task *_get_device_info(const struct dm_event_handler *dmevh)
struct dm_info info;
if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
log_error("_get_device_info: dm_task creation for info failed.");
log_error("_get_device_info: dm_task creation for info failed");
return NULL;
}
@@ -543,17 +539,17 @@ static struct dm_task *_get_device_info(const struct dm_event_handler *dmevh)
/* FIXME Add name or uuid or devno to messages */
if (!dm_task_run(dmt)) {
log_error("_get_device_info: dm_task_run() failed.");
log_error("_get_device_info: dm_task_run() failed");
goto bad;
}
if (!dm_task_get_info(dmt, &info)) {
log_error("_get_device_info: failed to get info for device.");
log_error("_get_device_info: failed to get info for device");
goto bad;
}
if (!info.exists) {
log_error("_get_device_info: %s%s%s%.0d%s%.0d%s%s: device not found.",
log_error("_get_device_info: %s%s%s%.0d%s%.0d%s%s: device not found",
dmevh->uuid ? : "",
(!dmevh->uuid && dmevh->dev_name) ? dmevh->dev_name : "",
(!dmevh->uuid && !dmevh->dev_name && dmevh->major > 0) ? "(" : "",
@@ -622,12 +618,12 @@ int dm_event_register_handler(const struct dm_event_handler *dmevh)
!strstr(dmevh->dso, "libdevmapper-event-lvm2snapshot.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2mirror.so") &&
!strstr(dmevh->dso, "libdevmapper-event-lvm2raid.so"))
log_warn("WARNING: %s: dmeventd plugins are deprecated.", dmevh->dso);
log_warn("WARNING: %s: dmeventd plugins are deprecated", dmevh->dso);
if ((err = _do_event(DM_EVENT_CMD_REGISTER_FOR_EVENT, dmevh->dmeventd_path, &msg,
dmevh->dso, uuid, dmevh->mask, dmevh->timeout)) < 0) {
log_error("%s: event registration failed: %s.",
log_error("%s: event registration failed: %s",
dm_task_get_name(dmt),
msg.data ? msg.data : strerror(-err));
ret = 0;
@@ -654,7 +650,7 @@ int dm_event_unregister_handler(const struct dm_event_handler *dmevh)
if ((err = _do_event(DM_EVENT_CMD_UNREGISTER_FOR_EVENT, dmevh->dmeventd_path, &msg,
dmevh->dso, uuid, dmevh->mask, dmevh->timeout)) < 0) {
log_error("%s: event deregistration failed: %s.",
log_error("%s: event deregistration failed: %s",
dm_task_get_name(dmt),
msg.data ? msg.data : strerror(-err));
ret = 0;
@@ -827,79 +823,6 @@ int dm_event_get_version(struct dm_event_fifos *fifos, int *version) {
return 1;
}
void dm_event_log_set(int debug_level, int use_syslog)
{
_debug_level = debug_level;
_use_syslog = use_syslog;
}
void dm_event_log(const char *subsys, int level, const char *file,
int line, int dm_errno_or_class,
const char *format, va_list ap)
{
static pthread_mutex_t _log_mutex = PTHREAD_MUTEX_INITIALIZER;
static time_t start = 0;
const char *indent = "";
FILE *stream = stdout;
int prio = -1;
time_t now;
switch (level & ~(_LOG_STDERR | _LOG_ONCE)) {
case _LOG_DEBUG:
if (_debug_level < 3)
return;
prio = LOG_DEBUG;
indent = " ";
break;
case _LOG_INFO:
if (_debug_level < 2)
return;
prio = LOG_INFO;
indent = " ";
break;
case _LOG_NOTICE:
if (_debug_level < 1)
return;
prio = LOG_NOTICE;
indent = " ";
break;
case _LOG_WARN:
prio = LOG_WARNING;
break;
case _LOG_ERR:
prio = LOG_ERR;
stream = stderr;
break;
default:
prio = LOG_CRIT;
}
/* Serialize to keep lines readable */
pthread_mutex_lock(&_log_mutex);
if (_use_syslog) {
vsyslog(prio, format, ap);
} else {
now = time(NULL);
if (!start)
start = now;
now -= start;
fprintf(stream, "[%2d:%02d] %8x:%-6s%s",
(int)now / 60, (int)now % 60,
// TODO: Maybe use shorter ID
// ((int)(pthread_self()) >> 6) & 0xffff,
(int)pthread_self(), subsys,
(_debug_level > 3) ? "" : indent);
if (_debug_level > 3)
fprintf(stream, "%28s:%4d %s", file, line, indent);
vfprintf(stream, _(format), ap);
fputc('\n', stream);
fflush(stream);
}
pthread_mutex_unlock(&_log_mutex);
}
#if 0 /* left out for now */
static char *_skip_string(char *src, const int delimiter)
@@ -933,7 +856,7 @@ int dm_event_get_timeout(const char *device_path, uint32_t *timeout)
0, 0))) {
char *p = _skip_string(msg.data, ' ');
if (!p) {
log_error("Malformed reply from dmeventd '%s'.",
log_error("malformed reply from dmeventd '%s'\n",
msg.data);
dm_free(msg.data);
return -EIO;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
@@ -105,25 +105,6 @@ int dm_event_get_registered_device(struct dm_event_handler *dmevh, int next);
int dm_event_register_handler(const struct dm_event_handler *dmevh);
int dm_event_unregister_handler(const struct dm_event_handler *dmevh);
/* Set debug level for logging, and whether to log on stdout/stderr or syslog */
void dm_event_log_set(int debug_level, int use_syslog);
/* Log messages acroding to current debug level */
__attribute__((format(printf, 6, 0)))
void dm_event_log(const char *subsys, int level, const char *file,
int line, int dm_errno_or_class,
const char *format, va_list ap);
/* Macro to route print_log do dm_event_log() */
#define DM_EVENT_LOG_FN(subsys) \
void print_log(int level, const char *file, int line, int dm_errno_or_class,\
const char *format, ...)\
{\
va_list ap;\
va_start(ap, format);\
dm_event_log(subsys, level, file, line, dm_errno_or_class, format, ap);\
va_end(ap);\
}
/* Prototypes for DSO interface, see dmeventd.c, struct dso_data for
detailed descriptions. */
// FIXME misuse of bitmask as enum

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -13,11 +13,15 @@
*/
#include "lib.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include "log.h"
#include "lvm2cmd.h"
#include "dmeventd_lvm.h"
#include <pthread.h>
#include <syslog.h>
extern int dmeventd_debug;
/*
* register_device() is called first and performs initialisation.
@@ -32,19 +36,48 @@ static int _register_count = 0;
static struct dm_pool *_mem_pool = NULL;
static void *_lvm_handle = NULL;
DM_EVENT_LOG_FN("lvm")
static void _lvm2_print_log(int level, const char *file, int line,
int dm_errno_or_class, const char *msg)
{
print_log(level, file, line, dm_errno_or_class, "%s", msg);
}
/*
* Currently only one event can be processed at a time.
*/
static pthread_mutex_t _event_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* FIXME Do not pass things directly to syslog, rather use the existing logging
* facilities to sort logging ... however that mechanism needs to be somehow
* configurable and we don't have that option yet
*/
static void _temporary_log_fn(int level,
const char *file __attribute__((unused)),
int line __attribute__((unused)),
int dm_errno __attribute__((unused)),
const char *message)
{
level &= ~(_LOG_STDERR | _LOG_ONCE);
switch (level) {
case _LOG_DEBUG:
if (dmeventd_debug >= 3)
syslog(LOG_DEBUG, "%s", message);
break;
case _LOG_INFO:
if (dmeventd_debug >= 2)
syslog(LOG_INFO, "%s", message);
break;
case _LOG_NOTICE:
if (dmeventd_debug >= 1)
syslog(LOG_NOTICE, "%s", message);
break;
case _LOG_WARN:
syslog(LOG_WARNING, "%s", message);
break;
case _LOG_ERR:
syslog(LOG_ERR, "%s", message);
break;
default:
syslog(LOG_CRIT, "%s", message);
}
}
void dmeventd_lvm2_lock(void)
{
pthread_mutex_lock(&_event_mutex);
@@ -61,26 +94,24 @@ int dmeventd_lvm2_init(void)
pthread_mutex_lock(&_register_mutex);
/*
* Need some space for allocations. 1024 should be more
* than enough for what we need (device mapper name splitting)
*/
if (!_mem_pool && !(_mem_pool = dm_pool_create("mirror_dso", 1024)))
goto out;
if (!_lvm_handle) {
lvm2_log_fn(_lvm2_print_log);
if (!(_lvm_handle = lvm2_init()))
goto out;
/*
* Need some space for allocations. 1024 should be more
* than enough for what we need (device mapper name splitting)
*/
if (!_mem_pool && !(_mem_pool = dm_pool_create("mirror_dso", 1024))) {
lvm2_exit(_lvm_handle);
_lvm_handle = NULL;
if (!getenv("LVM_LOG_FILE_EPOCH"))
lvm2_log_fn(_temporary_log_fn);
if (!(_lvm_handle = lvm2_init())) {
dm_pool_destroy(_mem_pool);
_mem_pool = NULL;
goto out;
}
lvm2_disable_dmeventd_monitoring(_lvm_handle);
/* FIXME Temporary: move to dmeventd core */
lvm2_run(_lvm_handle, "_memlock_inc");
log_debug("lvm plugin initilized.");
}
_register_count++;
@@ -96,13 +127,11 @@ void dmeventd_lvm2_exit(void)
pthread_mutex_lock(&_register_mutex);
if (!--_register_count) {
log_debug("lvm plugin shuting down.");
lvm2_run(_lvm_handle, "_memlock_dec");
dm_pool_destroy(_mem_pool);
_mem_pool = NULL;
lvm2_exit(_lvm_handle);
_lvm_handle = NULL;
log_debug("lvm plugin exited.");
}
pthread_mutex_unlock(&_register_mutex);
@@ -125,8 +154,8 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
int r;
if (!dm_split_lvm_name(mem, device, &vg, &lv, &layer)) {
log_error("Unable to determine VG name from %s.",
device);
syslog(LOG_ERR, "Unable to determine VG name from %s.\n",
device);
return 0;
}
@@ -140,7 +169,7 @@ int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
dm_pool_free(mem, vg);
if (r < 0) {
log_error("Unable to form LVM command. (too long).");
syslog(LOG_ERR, "Unable to form LVM command. (too long).\n");
return 0;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -39,36 +39,4 @@ struct dm_pool *dmeventd_lvm2_pool(void);
int dmeventd_lvm2_command(struct dm_pool *mem, char *buffer, size_t size,
const char *cmd, const char *device);
#define dmeventd_lvm2_run_with_lock(cmdline) \
({\
int rc;\
dmeventd_lvm2_lock();\
rc = dmeventd_lvm2_run(cmdline);\
dmeventd_lvm2_unlock();\
rc;\
})
#define dmeventd_lvm2_init_with_pool(name, st) \
({\
struct dm_pool *mem;\
st = NULL;\
if (dmeventd_lvm2_init()) {\
if ((mem = dm_pool_create(name, 2048)) &&\
(st = dm_pool_zalloc(mem, sizeof(*st))))\
st->mem = mem;\
else {\
if (mem)\
dm_pool_destroy(mem);\
dmeventd_lvm2_exit();\
}\
}\
st;\
})
#define dmeventd_lvm2_exit_with_pool(pool) \
do {\
dm_pool_destroy(pool->mem);\
dmeventd_lvm2_exit();\
} while(0)
#endif /* _DMEVENTD_LVMWRAP_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -13,24 +13,20 @@
*/
#include "lib.h"
#include "libdevmapper-event.h"
#include "dmeventd_lvm.h"
#include "defaults.h"
#include <syslog.h> /* FIXME Replace syslog with multilog */
/* FIXME Missing openlog? */
/* FIXME Replace most syslogs with log_error() style messages and add complete context. */
/* FIXME Reformat to 80 char lines. */
#define ME_IGNORE 0
#define ME_INSYNC 1
#define ME_FAILURE 2
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
};
DM_EVENT_LOG_FN("mirr")
static int _process_status_code(const char status_code, const char *dev_name,
const char *dev_type, int r)
{
@@ -43,15 +39,18 @@ static int _process_status_code(const char status_code, const char *dev_name,
* U => Unclassified failure (bug)
*/
if (status_code == 'F') {
log_error("%s device %s flush failed.", dev_type, dev_name);
syslog(LOG_ERR, "%s device %s flush failed.",
dev_type, dev_name);
r = ME_FAILURE;
} else if (status_code == 'S')
log_error("%s device %s sync failed.", dev_type, dev_name);
syslog(LOG_ERR, "%s device %s sync failed.",
dev_type, dev_name);
else if (status_code == 'R')
log_error("%s device %s read failed.", dev_type, dev_name);
syslog(LOG_ERR, "%s device %s read failed.",
dev_type, dev_name);
else if (status_code != 'A') {
log_error("%s device %s has failed (%c).",
dev_type, dev_name, status_code);
syslog(LOG_ERR, "%s device %s has failed (%c).",
dev_type, dev_name, status_code);
r = ME_FAILURE;
}
@@ -126,49 +125,62 @@ out:
out_parse:
dm_free(args);
log_error("Unable to parse mirror status string.");
syslog(LOG_ERR, "Unable to parse mirror status string.");
return ME_IGNORE;
}
static int _remove_failed_devices(const char *cmd_lvscan, const char *cmd_lvconvert)
static int _remove_failed_devices(const char *device)
{
int r;
#define CMD_SIZE 256 /* FIXME Use system restriction */
char cmd_str[CMD_SIZE];
if (!dmeventd_lvm2_run_with_lock(cmd_lvscan))
log_info("Re-scan of mirrored device failed.");
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str),
"lvscan --cache", device))
return -1;
r = dmeventd_lvm2_run(cmd_str);
if (!r)
syslog(LOG_INFO, "Re-scan of mirror device %s failed.", device);
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device))
return -ENAMETOOLONG; /* FIXME Replace with generic error return - reason for failure has already got logged */
/* if repair goes OK, report success even if lvscan has failed */
r = dmeventd_lvm2_run_with_lock(cmd_lvconvert);
r = dmeventd_lvm2_run(cmd_str);
log_info("Repair of mirrored device %s.",
(r) ? "finished successfully" : "failed");
syslog(LOG_INFO, "Repair of mirrored device %s %s.", device,
(r) ? "finished successfully" : "failed");
return r;
return (r) ? 0 : -1;
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state = *user;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
const char *device = dm_task_get_name(dmt);
dmeventd_lvm2_lock();
do {
next = dm_get_next_target(dmt, next, &start, &length,
&target_type, &params);
if (!target_type) {
log_info("%s mapping lost.", device);
syslog(LOG_INFO, "%s mapping lost.", device);
continue;
}
if (strcmp(target_type, "mirror")) {
log_info("%s has unmirrored portion.", device);
syslog(LOG_INFO, "%s has unmirrored portion.", device);
continue;
}
@@ -178,75 +190,54 @@ void process_event(struct dm_task *dmt,
_part_ of the device is in sync
Also, this is not an error
*/
log_notice("%s is now in-sync.", device);
syslog(LOG_NOTICE, "%s is now in-sync.", device);
break;
case ME_FAILURE:
log_error("Device failure in %s.", device);
if (!_remove_failed_devices(state->cmd_lvscan,
state->cmd_lvconvert))
syslog(LOG_ERR, "Device failure in %s.", device);
if (_remove_failed_devices(device))
/* FIXME Why are all the error return codes unused? Get rid of them? */
log_error("Failed to remove faulty devices in %s.",
device);
syslog(LOG_ERR, "Failed to remove faulty devices in %s.",
device);
/* Should check before warning user that device is now linear
else
log_notice("%s is now a linear device.",
device);
syslog(LOG_NOTICE, "%s is now a linear device.\n",
device);
*/
break;
case ME_IGNORE:
break;
default:
/* FIXME Provide value then! */
log_info("Unknown event received.");
syslog(LOG_INFO, "Unknown event received.");
}
} while (next);
dmeventd_lvm2_unlock();
}
int register_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state;
if (!dmeventd_lvm2_init())
return 0;
if (!dmeventd_lvm2_init_with_pool("mirror_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
log_info("Monitoring mirror device %s for events.", device);
syslog(LOG_INFO, "Monitoring mirror device %s for events.", device);
return 1;
bad:
log_error("Failed to monitor mirror %s.", device);
return 0;
}
int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state = *user;
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring mirror device %s for events.",
device);
syslog(LOG_INFO, "No longer monitoring mirror device %s for events.",
device);
dmeventd_lvm2_exit();
return 1;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2005-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -13,133 +13,168 @@
*/
#include "lib.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include "dmeventd_lvm.h"
struct dso_state {
struct dm_pool *mem;
char cmd_lvscan[512];
char cmd_lvconvert[512];
int failed;
};
DM_EVENT_LOG_FN("raid")
#include <syslog.h> /* FIXME Replace syslog with multilog */
/* FIXME Missing openlog? */
/* FIXME Replace most syslogs with log_error() style messages and add complete context. */
/* FIXME Reformat to 80 char lines. */
static int _process_raid_event(struct dso_state *state, char *params, const char *device)
/*
* run_repair is a close copy to
* plugins/mirror/dmeventd_mirror.c:_remove_failed_devices()
*/
static int run_repair(const char *device)
{
struct dm_status_raid *status;
const char *d;
int r;
#define CMD_SIZE 256 /* FIXME Use system restriction */
char cmd_str[CMD_SIZE];
if (!dm_get_status_raid(state->mem, params, &status)) {
log_error("Failed to process status line for %s.", device);
return 0;
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str),
"lvscan --cache", device))
return -1;
r = dmeventd_lvm2_run(cmd_str);
if (!r)
syslog(LOG_INFO, "Re-scan of RAID device %s failed.", device);
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device))
return -1;
/* if repair goes OK, report success even if lvscan has failed */
r = dmeventd_lvm2_run(cmd_str);
if (!r)
syslog(LOG_INFO, "Repair of RAID device %s failed.", device);
return (r) ? 0 : -1;
}
static int _process_raid_event(char *params, const char *device)
{
int i, n, failure = 0;
char *p, *a[4];
char *raid_type;
char *num_devices;
char *health_chars;
char *resync_ratio;
/*
* RAID parms: <raid_type> <#raid_disks> \
* <health chars> <resync ratio>
*/
if (!dm_split_words(params, 4, 0, a)) {
syslog(LOG_ERR, "Failed to process status line for %s\n",
device);
return -EINVAL;
}
raid_type = a[0];
num_devices = a[1];
health_chars = a[2];
resync_ratio = a[3];
if (!(n = atoi(num_devices))) {
syslog(LOG_ERR, "Failed to parse number of devices for %s: %s",
device, num_devices);
return -EINVAL;
}
if ((d = strchr(status->dev_health, 'D'))) {
if (state->failed)
goto out; /* already reported */
log_error("Device #%d of %s array, %s, has failed.",
(int)(d - status->dev_health),
status->raid_type, device);
state->failed = 1;
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvscan))
log_warn("WARNING: Re-scan of RAID device %s failed.", device);
/* if repair goes OK, report success even if lvscan has failed */
if (!dmeventd_lvm2_run_with_lock(state->cmd_lvconvert)) {
log_info("Repair of RAID device %s failed.", device);
dm_pool_free(state->mem, status);
return 0;
for (i = 0; i < n; i++) {
switch (health_chars[i]) {
case 'A':
/* Device is 'A'live and well */
case 'a':
/* Device is 'a'live, but not yet in-sync */
break;
case 'D':
syslog(LOG_ERR,
"Device #%d of %s array, %s, has failed.",
i, raid_type, device);
failure++;
break;
default:
/* Unhandled character returned from kernel */
break;
}
} else {
state->failed = 0;
log_info("%s array, %s, is %s in-sync.",
status->raid_type, device,
(status->insync_regions == status->total_regions) ? "now" : "not");
if (failure)
return run_repair(device);
}
out:
dm_pool_free(state->mem, status);
return 1;
p = strstr(resync_ratio, "/");
if (!p) {
syslog(LOG_ERR, "Failed to parse resync_ratio for %s: %s",
device, resync_ratio);
return -EINVAL;
}
p[0] = '\0';
syslog(LOG_INFO, "%s array, %s, is %s in-sync.",
raid_type, device, strcmp(resync_ratio, p+1) ? "not" : "now");
return 0;
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state = *user;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
const char *device = dm_task_get_name(dmt);
dmeventd_lvm2_lock();
do {
next = dm_get_next_target(dmt, next, &start, &length,
&target_type, &params);
if (!target_type) {
log_info("%s mapping lost.", device);
syslog(LOG_INFO, "%s mapping lost.", device);
continue;
}
if (strcmp(target_type, "raid")) {
log_info("%s has non-raid portion.", device);
syslog(LOG_INFO, "%s has non-raid portion.", device);
continue;
}
if (!_process_raid_event(state, params, device))
log_error("Failed to process event for %s.",
device);
if (_process_raid_event(params, device))
syslog(LOG_ERR, "Failed to process event for %s",
device);
} while (next);
dmeventd_lvm2_unlock();
}
int register_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state;
if (!dmeventd_lvm2_init())
return 0;
if (!dmeventd_lvm2_init_with_pool("raid_state", state))
goto_bad;
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvscan, sizeof(state->cmd_lvscan),
"lvscan --cache", device) ||
!dmeventd_lvm2_command(state->mem, state->cmd_lvconvert, sizeof(state->cmd_lvconvert),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
*user = state;
log_info("Monitoring RAID device %s for events.", device);
syslog(LOG_INFO, "Monitoring RAID device %s for events.", device);
return 1;
bad:
log_error("Failed to monitor RAID %s.", device);
return 0;
}
int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **unused __attribute__((unused)))
{
struct dso_state *state = *user;
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring RAID device %s for events.",
device);
syslog(LOG_INFO, "No longer monitoring RAID device %s for events.",
device);
dmeventd_lvm2_exit();
return 1;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2007-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2007-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -13,31 +13,31 @@
*/
#include "lib.h"
#include "dmeventd_lvm.h"
#include "libdevmapper-event.h"
#include "dmeventd_lvm.h"
#include <sys/wait.h>
#include <syslog.h> /* FIXME Replace syslog with multilog */
#include <stdarg.h>
#include <pthread.h>
/* FIXME Missing openlog? */
/* First warning when snapshot is 80% full. */
#define WARNING_THRESH (DM_PERCENT_1 * 80)
#define WARNING_THRESH 80
/* Run a check every 5%. */
#define CHECK_STEP (DM_PERCENT_1 * 5)
#define CHECK_STEP 5
/* Do not bother checking snapshots less than 50% full. */
#define CHECK_MINIMUM (DM_PERCENT_1 * 50)
#define CHECK_MINIMUM 50
#define UMOUNT_COMMAND "/bin/umount"
struct dso_state {
struct dm_pool *mem;
dm_percent_t percent_check;
int percent_check;
uint64_t known_size;
char cmd_lvextend[512];
char cmd_str[1024];
};
DM_EVENT_LOG_FN("snap")
static int _run(const char *cmd, ...)
{
va_list ap;
@@ -62,7 +62,7 @@ static int _run(const char *cmd, ...)
va_end(ap);
execvp(cmd, (char **)argv);
log_sys_error("exec", cmd);
syslog(LOG_ERR, "Failed to execute %s: %s.\n", cmd, strerror(errno));
exit(127);
}
@@ -81,56 +81,18 @@ static int _run(const char *cmd, ...)
static int _extend(const char *cmd)
{
log_debug("Extending snapshot via %s.", cmd);
return dmeventd_lvm2_run_with_lock(cmd);
return dmeventd_lvm2_run(cmd);
}
#ifdef SNAPSHOT_REMOVE
/* Remove invalid snapshot from dm-table */
/* Experimental for now and not used by default */
static int _remove(const char *uuid)
{
int r = 1;
uint32_t cookie = 0;
struct dm_task *dmt;
if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
return 0;
if (!dm_task_set_uuid(dmt, uuid)) {
r = 0;
goto_out;
}
dm_task_retry_remove(dmt);
if (!dm_task_set_cookie(dmt, &cookie, 0)) {
r = 0;
goto_out;
}
if (!dm_task_run(dmt)) {
r = 0;
goto_out;
}
out:
dm_task_destroy(dmt);
return r;
}
#endif /* SNAPSHOT_REMOVE */
static void _umount(const char *device, int major, int minor)
{
FILE *mounts;
char buffer[4096];
char *words[3];
struct stat st;
const char procmounts[] = "/proc/mounts";
if (!(mounts = fopen(procmounts, "r"))) {
log_sys_error("fopen", procmounts);
log_error("Not umounting %s.", device);
if (!(mounts = fopen("/proc/mounts", "r"))) {
syslog(LOG_ERR, "Could not read /proc/mounts. Not umounting %s.\n", device);
return;
}
@@ -150,22 +112,21 @@ static void _umount(const char *device, int major, int minor)
if (S_ISBLK(st.st_mode) &&
major(st.st_rdev) == major &&
minor(st.st_rdev) == minor) {
log_error("Unmounting invalid snapshot %s from %s.", device, words[1]);
if (!_run(UMOUNT_COMMAND, "-fl", words[1], NULL))
log_error("Failed to umount snapshot %s from %s: %s.",
device, words[1], strerror(errno));
syslog(LOG_ERR, "Unmounting invalid snapshot %s from %s.\n", device, words[1]);
if (!_run(UMOUNT_COMMAND, "-fl", words[1], NULL))
syslog(LOG_ERR, "Failed to umount snapshot %s from %s: %s.\n",
device, words[1], strerror(errno));
}
}
if (fclose(mounts))
log_sys_error("close", procmounts);
syslog(LOG_ERR, "Failed to close /proc/mounts.\n");
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
void **private)
{
struct dso_state *state = *user;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
@@ -173,47 +134,28 @@ void process_event(struct dm_task *dmt,
struct dm_status_snapshot *status = NULL;
const char *device = dm_task_get_name(dmt);
int percent;
struct dm_info info;
struct dso_state *state = *private;
/* No longer monitoring, waiting for remove */
if (!state->percent_check)
return;
dmeventd_lvm2_lock();
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type || strcmp(target_type, "snapshot")) {
log_error("Target %s is not snapshot.", target_type);
return;
}
if (!target_type)
goto out;
if (!dm_get_status_snapshot(state->mem, params, &status)) {
log_error("Cannot parse snapshot %s state: %s.", device, params);
return;
}
if (!dm_get_status_snapshot(state->mem, params, &status))
goto out;
/*
* If the snapshot has been invalidated or we failed to parse
* the status string. Report the full status string to syslog.
*/
if (status->invalid || status->overflow || !status->total_sectors) {
log_warn("WARNING: Snapshot %s changed state to: %s and should be removed.",
device, params);
state->percent_check = 0;
if (dm_task_get_info(dmt, &info))
if (status->invalid) {
struct dm_info info;
if (dm_task_get_info(dmt, &info)) {
dmeventd_lvm2_unlock();
_umount(device, info.major, info.minor);
#ifdef SNAPSHOT_REMOVE
/* Maybe configurable ? */
_remove(dm_task_get_uuid(dmt));
#endif
pthread_kill(pthread_self(), SIGALRM);
goto out;
}
if (length <= (status->used_sectors - status->metadata_sectors)) {
/* TODO eventually recognize earlier when room is enough */
log_info("Dropping monitoring of fully provisioned snapshot %s.",
device);
pthread_kill(pthread_self(), SIGALRM);
goto out;
return;
} /* else; too bad, but this is best-effort thing... */
}
/* Snapshot size had changed. Clear the threshold. */
@@ -222,50 +164,69 @@ void process_event(struct dm_task *dmt,
state->known_size = status->total_sectors;
}
percent = dm_make_percent(status->used_sectors, status->total_sectors);
/*
* If the snapshot has been invalidated or we failed to parse
* the status string. Report the full status string to syslog.
*/
if (status->invalid || !status->total_sectors) {
syslog(LOG_ERR, "Snapshot %s changed state to: %s\n", device, params);
state->percent_check = 0;
goto out;
}
percent = (int) (100 * status->used_sectors / status->total_sectors);
if (percent >= state->percent_check) {
/* Usage has raised more than CHECK_STEP since the last
time. Run actions. */
state->percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Snapshot %s is now %.2f%% full.",
device, dm_percent_to_float(percent));
syslog(LOG_WARNING, "Snapshot %s is now %i%% full.\n", device, percent);
/* Try to extend the snapshot, in accord with user-set policies */
if (!_extend(state->cmd_lvextend))
log_error("Failed to extend snapshot %s.", device);
if (!_extend(state->cmd_str))
syslog(LOG_ERR, "Failed to extend snapshot %s.\n", device);
}
out:
dm_pool_free(state->mem, status);
if (status)
dm_pool_free(state->mem, status);
dmeventd_lvm2_unlock();
}
int register_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **private)
{
struct dm_pool *statemem = NULL;
struct dso_state *state;
if (!dmeventd_lvm2_init_with_pool("snapshot_state", state))
goto_bad;
if (!dmeventd_lvm2_init())
goto out;
if (!dmeventd_lvm2_command(state->mem, state->cmd_lvextend,
sizeof(state->cmd_lvextend),
"lvextend --use-policies", device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
}
if (!(statemem = dm_pool_create("snapshot_state", 512)) ||
!(state = dm_pool_zalloc(statemem, sizeof(*state))))
goto bad;
if (!dmeventd_lvm2_command(statemem, state->cmd_str,
sizeof(state->cmd_str),
"lvextend --use-policies", device))
goto bad;
state->mem = statemem;
state->percent_check = CHECK_MINIMUM;
*user = state;
*private = state;
log_info("Monitoring snapshot %s.", device);
syslog(LOG_INFO, "Monitoring snapshot %s\n", device);
return 1;
bad:
log_error("Failed to monitor snapshot %s.", device);
if (statemem)
dm_pool_destroy(statemem);
dmeventd_lvm2_exit();
out:
syslog(LOG_ERR, "Failed to monitor snapshot %s.\n", device);
return 0;
}
@@ -274,12 +235,13 @@ int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **private)
{
struct dso_state *state = *user;
struct dso_state *state = *private;
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring snapshot %s.", device);
syslog(LOG_INFO, "No longer monitoring snapshot %s\n", device);
dm_pool_destroy(state->mem);
dmeventd_lvm2_exit();
return 1;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2011-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011-2013 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -12,33 +12,25 @@
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "lib.h" /* using here lvm log */
#include "dmeventd_lvm.h"
#include "lib.h"
#include "libdevmapper-event.h"
#include "dmeventd_lvm.h"
#include <sys/wait.h>
#include <syslog.h> /* FIXME Replace syslog with multilog */
#include <stdarg.h>
#include <pthread.h>
/* FIXME Missing openlog? */
/* TODO - move this mountinfo code into library to be reusable */
#ifdef __linux__
# include "kdev_t.h"
#else
# define MAJOR(x) major((x))
# define MINOR(x) minor((x))
#endif
/* First warning when thin data or metadata is 80% full. */
#define WARNING_THRESH (DM_PERCENT_1 * 80)
/* First warning when thin is 80% full. */
#define WARNING_THRESH 80
/* Run a check every 5%. */
#define CHECK_STEP (DM_PERCENT_1 * 5)
/* Do not bother checking thin data or metadata is less than 50% full. */
#define CHECK_MINIMUM (DM_PERCENT_1 * 50)
#define CHECK_STEP 5
/* Do not bother checking thins less than 50% full. */
#define CHECK_MINIMUM 50
#define UMOUNT_COMMAND "/bin/umount"
#define MAX_FAILS (10)
#define THIN_DEBUG 0
struct dso_state {
@@ -47,11 +39,18 @@ struct dso_state {
int data_percent_check;
uint64_t known_metadata_size;
uint64_t known_data_size;
unsigned fails;
char cmd_str[1024];
};
DM_EVENT_LOG_FN("thin")
/* TODO - move this mountinfo code into library to be reusable */
#ifdef __linux__
# include "kdev_t.h"
#else
# define MAJOR(x) major((x))
# define MINOR(x) minor((x))
# define MKDEV(x,y) makedev((x),(y))
#endif
/* Get dependencies for device, and try to find matching device */
static int _has_deps(const char *name, int tp_major, int tp_minor, int *dev_minor)
@@ -94,8 +93,8 @@ static int _has_deps(const char *name, int tp_major, int tp_minor, int *dev_mino
{
char dev_name[PATH_MAX];
if (dm_device_get_name(major, minor, 0, dev_name, sizeof(dev_name)))
log_debug("Found %s (%u:%u) depends on %s.",
name, major, *dev_minor, dev_name);
syslog(LOG_DEBUG, "Found %s (%u:%u) depends on %s",
name, major, *dev_minor, dev_name);
}
#endif
r = 1;
@@ -142,6 +141,14 @@ out:
return r;
}
static int _extend(struct dso_state *state)
{
#if THIN_DEBUG
syslog(LOG_INFO, "dmeventd executes: %s.\n", state->cmd_str);
#endif
return dmeventd_lvm2_run(state->cmd_str);
}
static int _run(const char *cmd, ...)
{
va_list ap;
@@ -161,12 +168,12 @@ static int _run(const char *cmd, ...)
argv = alloca(sizeof(const char *) * (argc + 1));
argv[0] = cmd;
va_start(ap, cmd);
va_start(ap, cmd);
while ((argv[++i] = va_arg(ap, const char *)));
va_end(ap);
execvp(cmd, (char **)argv);
log_sys_error("exec", cmd);
syslog(LOG_ERR, "Failed to execute %s: %s.\n", cmd, strerror(errno));
exit(127);
}
@@ -184,9 +191,9 @@ static int _run(const char *cmd, ...)
}
struct mountinfo_s {
const char *device;
struct dm_info info;
dm_bitset_t minors; /* Bitset for active thin pool minors */
const char *device;
};
static int _umount_device(char *buffer, unsigned major, unsigned minor,
@@ -195,11 +202,11 @@ static int _umount_device(char *buffer, unsigned major, unsigned minor,
struct mountinfo_s *data = cb_data;
if ((major == data->info.major) && dm_bit(data->minors, minor)) {
log_info("Unmounting thin volume %s from %s.",
data->device, target);
syslog(LOG_INFO, "Unmounting thin volume %s from %s.\n",
data->device, target);
if (!_run(UMOUNT_COMMAND, "-fl", target, NULL))
log_error("Failed to umount thin %s from %s: %s.",
data->device, target, strerror(errno));
syslog(LOG_ERR, "Failed to umount thin %s from %s: %s.\n",
data->device, target, strerror(errno));
}
return 1;
@@ -209,94 +216,78 @@ static int _umount_device(char *buffer, unsigned major, unsigned minor,
* Find all thin pool users and try to umount them.
* TODO: work with read-only thin pool support
*/
static void _umount(struct dm_task *dmt)
static void _umount(struct dm_task *dmt, const char *device)
{
/* TODO: Convert to use hash to reduce memory usage */
static const size_t MINORS = (1U << 20); /* 20 bit */
struct mountinfo_s data = { NULL };
struct mountinfo_s data = {
.device = device,
};
if (!dm_task_get_info(dmt, &data.info))
return;
data.device = dm_task_get_name(dmt);
dmeventd_lvm2_unlock();
if (!(data.minors = dm_bitset_create(NULL, MINORS))) {
log_error("Failed to allocate bitset. Not unmounting %s.", data.device);
syslog(LOG_ERR, "Failed to allocate bitset. Not unmounting %s.\n", device);
goto out;
}
if (!_find_all_devs(data.minors, data.info.major, data.info.minor)) {
log_error("Failed to detect mounted volumes for %s.", data.device);
syslog(LOG_ERR, "Failed to detect mounted volumes for %s.\n", device);
goto out;
}
if (!dm_mountinfo_read(_umount_device, &data)) {
log_error("Could not parse mountinfo file.");
syslog(LOG_ERR, "Could not parse mountinfo file.\n");
goto out;
}
out:
if (data.minors)
dm_bitset_destroy(data.minors);
}
static void _use_policy(struct dm_task *dmt, struct dso_state *state)
{
#if THIN_DEBUG
log_info("dmeventd executes: %s.", state->cmd_str);
#endif
if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
log_error("Failed to extend thin pool %s.",
dm_task_get_name(dmt));
_umount(dmt);
state->fails++;
} else
state->fails = 0;
dmeventd_lvm2_lock();
}
void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)),
void **user)
void **private)
{
const char *device = dm_task_get_name(dmt);
int percent;
struct dso_state *state = *user;
struct dso_state *state = *private;
struct dm_status_thin_pool *tps = NULL;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
int needs_policy = 0;
#if 0
/* No longer monitoring, waiting for remove */
if (!state->meta_percent_check && !state->data_percent_check)
return;
#endif
if (event & DM_EVENT_DEVICE_ERROR) {
/* Error -> no need to check and do instant resize */
_use_policy(dmt, state);
goto out;
}
dmeventd_lvm2_lock();
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!target_type || (strcmp(target_type, "thin-pool") != 0)) {
log_error("Invalid target type.");
syslog(LOG_ERR, "Invalid target type.\n");
goto out;
}
if (!dm_get_status_thin_pool(state->mem, params, &tps)) {
log_error("Failed to parse status.");
_umount(dmt);
syslog(LOG_ERR, "Failed to parse status.\n");
_umount(dmt, device);
goto out;
}
#if THIN_DEBUG
log_debug("Thin pool status " FMTu64 "/" FMTu64 " "
FMTu64 "/" FMTu64 ".",
tps->used_metadata_blocks, tps->total_metadata_blocks,
tps->used_data_blocks, tps->total_data_blocks);
syslog(LOG_INFO, "%p: Got status %" PRIu64 " / %" PRIu64
" %" PRIu64 " / %" PRIu64 ".\n", state,
tps->used_metadata_blocks, tps->total_metadata_blocks,
tps->used_data_blocks, tps->total_data_blocks);
#endif
/* Thin pool size had changed. Clear the threshold. */
@@ -310,7 +301,7 @@ void process_event(struct dm_task *dmt,
state->known_data_size = tps->total_data_blocks;
}
percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
percent = 100 * tps->used_metadata_blocks / tps->total_metadata_blocks;
if (percent >= state->metadata_percent_check) {
/*
* Usage has raised more than CHECK_STEP since the last
@@ -320,12 +311,18 @@ void process_event(struct dm_task *dmt,
/* FIXME: extension of metadata needs to be written! */
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s metadata is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
syslog(LOG_WARNING, "Thin metadata %s is now %i%% full.\n",
device, percent);
/* Try to extend the metadata, in accord with user-set policies */
if (!_extend(state)) {
syslog(LOG_ERR, "Failed to extend thin metadata %s.\n",
device);
_umount(dmt, device);
}
/* FIXME: hmm READ-ONLY switch should happen in error path */
}
percent = dm_make_percent(tps->used_data_blocks, tps->total_data_blocks);
percent = 100 * tps->used_data_blocks / tps->total_data_blocks;
if (percent >= state->data_percent_check) {
/*
* Usage has raised more than CHECK_STEP since
@@ -334,53 +331,56 @@ void process_event(struct dm_task *dmt,
state->data_percent_check = (percent / CHECK_STEP) * CHECK_STEP + CHECK_STEP;
if (percent >= WARNING_THRESH) /* Print a warning to syslog. */
log_warn("WARNING: Thin pool %s data is now %.2f%% full.",
device, dm_percent_to_float(percent));
needs_policy = 1;
syslog(LOG_WARNING, "Thin %s is now %i%% full.\n", device, percent);
/* Try to extend the thin data, in accord with user-set policies */
if (!_extend(state)) {
syslog(LOG_ERR, "Failed to extend thin %s.\n", device);
state->data_percent_check = 0;
_umount(dmt, device);
}
/* FIXME: hmm READ-ONLY switch should happen in error path */
}
if (needs_policy)
_use_policy(dmt, state);
out:
if (tps)
dm_pool_free(state->mem, tps);
if (state->fails >= MAX_FAILS) {
log_warn("WARNING: Dropping monitoring of %s. "
"lvm2 command fails too often (%u times in raw).",
device, state->fails);
pthread_kill(pthread_self(), SIGALRM);
}
dmeventd_lvm2_unlock();
}
int register_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **private)
{
struct dm_pool *statemem = NULL;
struct dso_state *state;
if (!dmeventd_lvm2_init_with_pool("thin_pool_state", state))
goto_bad;
if (!dmeventd_lvm2_init())
goto bad;
if (!dmeventd_lvm2_command(state->mem, state->cmd_str,
if (!(statemem = dm_pool_create("thin_pool_state", 2048)) ||
!(state = dm_pool_zalloc(statemem, sizeof(*state))) ||
!dmeventd_lvm2_command(statemem, state->cmd_str,
sizeof(state->cmd_str),
"lvextend --use-policies",
device)) {
dmeventd_lvm2_exit_with_pool(state);
goto_bad;
if (statemem)
dm_pool_destroy(statemem);
dmeventd_lvm2_exit();
goto bad;
}
state->mem = statemem;
state->metadata_percent_check = CHECK_MINIMUM;
state->data_percent_check = CHECK_MINIMUM;
*user = state;
*private = state;
log_info("Monitoring thin %s.", device);
syslog(LOG_INFO, "Monitoring thin %s.\n", device);
return 1;
bad:
log_error("Failed to monitor thin %s.", device);
syslog(LOG_ERR, "Failed to monitor thin %s.\n", device);
return 0;
}
@@ -389,12 +389,13 @@ int unregister_device(const char *device,
const char *uuid __attribute__((unused)),
int major __attribute__((unused)),
int minor __attribute__((unused)),
void **user)
void **private)
{
struct dso_state *state = *user;
struct dso_state *state = *private;
dmeventd_lvm2_exit_with_pool(state);
log_info("No longer monitoring thin %s.", device);
syslog(LOG_INFO, "No longer monitoring thin %s.\n", device);
dm_pool_destroy(state->mem);
dmeventd_lvm2_exit();
return 1;
}

View File

@@ -45,8 +45,6 @@ lvmetactl: lvmetactl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmetactl.o $(LVMLIBS)
CLEAN_TARGETS += lvmetactl.o
# TODO: No idea. No idea how to test either.
#ifneq ("$(CFLOW_CMD)", "")
#CFLOW_SOURCES = $(addprefix $(srcdir)/, $(SOURCES))

View File

@@ -34,16 +34,16 @@ int main(int argc, char **argv)
int ver;
if (argc < 2) {
printf("lvmetactl dump\n");
printf("lvmetactl pv_list\n");
printf("lvmetactl vg_list\n");
printf("lvmetactl vg_lookup_name <name>\n");
printf("lvmetactl vg_lookup_uuid <uuid>\n");
printf("lvmetactl pv_lookup_uuid <uuid>\n");
printf("lvmetactl set_global_invalid 0|1\n");
printf("lvmetactl get_global_invalid\n");
printf("lvmetactl set_vg_version <uuid> <name> <version>\n");
printf("lvmetactl vg_lock_type <uuid>\n");
printf("lvmeta dump\n");
printf("lvmeta pv_list\n");
printf("lvmeta vg_list\n");
printf("lvmeta vg_lookup_name <name>\n");
printf("lvmeta vg_lookup_uuid <uuid>\n");
printf("lvmeta pv_lookup_uuid <uuid>\n");
printf("lvmeta set_global_invalid 0|1\n");
printf("lvmeta get_global_invalid\n");
printf("lvmeta set_vg_version <uuid> <version>\n");
printf("lvmeta vg_lock_type <uuid>\n");
return -1;
}
@@ -89,43 +89,18 @@ int main(int argc, char **argv)
printf("%s\n", reply.buffer.mem);
} else if (!strcmp(cmd, "set_vg_version")) {
if (argc < 5) {
printf("set_vg_version <uuid> <name> <ver>\n");
if (argc < 4) {
printf("set_vg_version <uuid> <ver>\n");
return -1;
}
uuid = argv[2];
name = argv[3];
ver = atoi(argv[4]);
if ((strlen(uuid) == 1) && (uuid[0] == '-'))
uuid = NULL;
if ((strlen(name) == 1) && (name[0] == '-'))
name = NULL;
if (uuid && name) {
reply = daemon_send_simple(h, "set_vg_info",
"uuid = %s", uuid,
"name = %s", name,
"version = %d", ver,
"token = %s", "skip",
NULL);
} else if (uuid) {
reply = daemon_send_simple(h, "set_vg_info",
"uuid = %s", uuid,
"version = %d", ver,
"token = %s", "skip",
NULL);
} else if (name) {
reply = daemon_send_simple(h, "set_vg_info",
"name = %s", name,
"version = %d", ver,
"token = %s", "skip",
NULL);
} else {
printf("name or uuid required\n");
return -1;
}
ver = atoi(argv[3]);
reply = daemon_send_simple(h, "set_vg_info",
"uuid = %s", uuid,
"version = %d", ver,
"token = %s", "skip",
NULL);
print_reply(reply);
} else if (!strcmp(cmd, "vg_lookup_name")) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2012-2015 Red Hat, Inc.
* Copyright (C) 2012 Red Hat, Inc.
*
* This file is part of LVM2.
*
@@ -24,7 +24,6 @@
#include "lvm-version.h"
#include <assert.h>
#include <errno.h>
#include <pthread.h>
#define LVMETAD_SOCKET DEFAULT_RUN_DIR "/lvmetad.socket"
@@ -124,7 +123,6 @@ struct vg_info {
#define VGFL_INVALID 0x00000001
typedef struct {
daemon_idle *idle;
log_state *log; /* convenience */
const char *log_config;
@@ -580,7 +578,7 @@ static void mark_outdated_pv(lvmetad_state *s, const char *vgid, const char *pvi
!(cft_vgid = make_text_node(outdated_pvs, "vgid", dm_pool_strdup(outdated_pvs->mem, vgid),
outdated_pvs->root, NULL)))
abort();
if (!dm_hash_insert(s->vgid_to_outdated_pvs, cft_vgid->v->v.str, outdated_pvs))
if(!dm_hash_insert(s->vgid_to_outdated_pvs, cft_vgid->v->v.str, outdated_pvs))
abort();
DEBUGLOG(s, "created outdated_pvs list for VG %s", vgid);
}
@@ -1312,29 +1310,20 @@ static response set_vg_info(lvmetad_state *s, request r)
{
struct dm_config_tree *vg;
struct vg_info *info;
const char *name;
const char *uuid;
const char *uuid = daemon_request_str(r, "uuid", NULL);
const int64_t new_version = daemon_request_int(r, "version", -1);
int64_t cache_version;
if (!uuid)
goto out;
if (new_version == -1)
goto out;
if (!(uuid = daemon_request_str(r, "uuid", NULL)))
goto use_name;
if ((vg = dm_hash_lookup(s->vgid_to_metadata, uuid)))
goto vers;
use_name:
if (!(name = daemon_request_str(r, "name", NULL)))
vg = dm_hash_lookup(s->vgid_to_metadata, uuid);
if (!vg)
goto out;
if (!(uuid = dm_hash_lookup(s->vgname_to_vgid, name)))
goto out;
if (!(vg = dm_hash_lookup(s->vgid_to_metadata, uuid)))
goto out;
vers:
if (!new_version)
goto inval;
@@ -1594,9 +1583,6 @@ static int init(daemon_state *s)
/* if (ls->initial_registrations)
_process_initial_registrations(ds->initial_registrations); */
if (ls->idle)
ls->idle->is_idle = 1;
return 1;
}
@@ -1619,39 +1605,21 @@ static int fini(daemon_state *s)
return 1;
}
static int process_timeout_arg(const char *str, unsigned *max_timeouts)
{
char *endptr;
unsigned long l;
errno = 0;
l = strtoul(str, &endptr, 10);
if (errno || *endptr || l >= UINT_MAX)
return 0;
*max_timeouts = (unsigned) l;
return 1;
}
static void usage(const char *prog, FILE *file)
{
fprintf(file, "Usage:\n"
"%s [-V] [-h] [-f] [-l level[,level ...]] [-s path] [-t secs]\n\n"
"%s [-V] [-h] [-f] [-l {all|wire|debug}] [-s path]\n\n"
" -V Show version of lvmetad\n"
" -h Show this help information\n"
" -f Don't fork, run in the foreground\n"
" -l Logging message levels (all,fatal,error,warn,info,wire,debug)\n"
" -l Logging message level (-l {all|wire|debug})\n"
" -p Set path to the pidfile\n"
" -s Set path to the socket to listen on\n"
" -t Time to wait in seconds before shutdown on idle (missing or 0 = inifinite)\n\n", prog);
" -s Set path to the socket to listen on\n\n", prog);
}
int main(int argc, char *argv[])
{
signed char opt;
struct timeval timeout;
daemon_idle di = { .ptimeout = &timeout };
lvmetad_state ls = { .log_config = "" };
daemon_state s = {
.daemon_fini = fini,
@@ -1666,7 +1634,7 @@ int main(int argc, char *argv[])
};
// use getopt_long
while ((opt = getopt(argc, argv, "?fhVl:p:s:t:")) != EOF) {
while ((opt = getopt(argc, argv, "?fhVl:p:s:")) != EOF) {
switch (opt) {
case 'h':
usage(argv[0], stdout);
@@ -1686,15 +1654,6 @@ int main(int argc, char *argv[])
case 's': // --socket
s.socket_path = optarg;
break;
case 't':
if (!process_timeout_arg(optarg, &di.max_timeouts)) {
fprintf(stderr, "Invalid value of timeout parameter.\n");
exit(EXIT_FAILURE);
}
/* 0 equals to wait indefinitely */
if (di.max_timeouts)
s.idle = ls.idle = &di;
break;
case 'V':
printf("lvmetad version: " LVM_VERSION "\n");
exit(1);

View File

@@ -17,7 +17,6 @@
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <syslog.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <sys/un.h>
@@ -27,16 +26,14 @@ static int info = 0;
static int dump = 0;
static int wait_opt = 0;
static int force_opt = 0;
static int kill_vg = 0;
static int drop_vg = 0;
static int gl_enable = 0;
static int gl_disable = 0;
static int stop_lockspaces = 0;
static char *arg_vg_name = NULL;
static char *able_vg_name = NULL;
#define DUMP_SOCKET_NAME "lvmlockd-dump.sock"
#define DUMP_BUF_SIZE (1024 * 1024)
static char dump_buf[DUMP_BUF_SIZE+1];
static char dump_buf[DUMP_BUF_SIZE];
static int dump_len;
static struct sockaddr_un dump_addr;
static socklen_t dump_addrlen;
@@ -151,12 +148,13 @@ static void format_info_r(char *line, char *r_name_out, char *r_type_out)
sscanf(line, "info=r name=%s type=%s mode=%s %s version=%u",
r_name, r_type, mode, sh_count, &ver);
strcpy(r_name_out, r_name);
strcpy(r_type_out, r_type);
/* when mode is not un, wait and print each lk line */
if (strcmp(mode, "un"))
if (strcmp(mode, "un")) {
strcpy(r_name_out, r_name);
strcpy(r_type_out, r_type);
return;
}
/* when mode is un, there will be no lk lines, so print now */
@@ -228,7 +226,7 @@ static void format_info_r_action(char *line, char *r_name, char *r_type)
find_client_info(client_id, &pid, cl_name);
if (strcmp(op, "lock")) {
printf("OP %s pid %u (%s)\n", op, pid, cl_name);
printf("OP %s pid %u (%s)", op, pid, cl_name);
return;
}
@@ -379,7 +377,6 @@ static int setup_dump_socket(void)
rv = bind(s, (struct sockaddr *) &dump_addr, dump_addrlen);
if (rv < 0) {
rv = -errno;
if (!close(s))
log_error("failed to close dump socket");
return rv;
@@ -393,7 +390,6 @@ static int do_dump(const char *req_name)
daemon_reply reply;
int result;
int fd, rv = 0;
int count = 0;
fd = setup_dump_socket();
if (fd < 0) {
@@ -424,18 +420,13 @@ static int do_dump(const char *req_name)
memset(dump_buf, 0, sizeof(dump_buf));
retry:
rv = recvfrom(fd, dump_buf + count, dump_len - count, MSG_WAITALL,
rv = recvfrom(fd, dump_buf, dump_len, MSG_WAITALL,
(struct sockaddr *)&dump_addr, &dump_addrlen);
if (rv < 0) {
log_error("recvfrom error %d %d", rv, errno);
rv = -errno;
goto out;
}
count += rv;
if (count < dump_len)
goto retry;
rv = 0;
if ((info && dump) || !strcmp(req_name, "dump"))
@@ -455,9 +446,9 @@ static int do_able(const char *req_name)
int rv;
reply = _lvmlockd_send(req_name,
"cmd = %s", "lvmlockctl",
"cmd = %s", "lvmlock",
"pid = %d", getpid(),
"vg_name = %s", arg_vg_name,
"vg_name = %s", able_vg_name,
NULL);
if (!_lvmlockd_result(reply, &result)) {
@@ -486,7 +477,7 @@ static int do_stop_lockspaces(void)
strcat(opts, "force ");
reply = _lvmlockd_send("stop_all",
"cmd = %s", "lvmlockctl",
"cmd = %s", "lvmlock",
"pid = %d", getpid(),
"opts = %s", opts[0] ? opts : "none",
NULL);
@@ -502,87 +493,6 @@ static int do_stop_lockspaces(void)
return rv;
}
static int do_kill(void)
{
daemon_reply reply;
int result;
int rv;
syslog(LOG_EMERG, "Lost access to sanlock lease storage in VG %s.", arg_vg_name);
/* These two lines explain the manual alternative to the FIXME below. */
syslog(LOG_EMERG, "Immediately deactivate LVs in VG %s.", arg_vg_name);
syslog(LOG_EMERG, "Once VG is unused, run lvmlockctl --drop %s.", arg_vg_name);
/*
* It may not be strictly necessary to notify lvmlockd of the kill, but
* lvmlockd can use this information to avoid attempting any new lock
* requests in the VG (which would fail anyway), and can return an
* error indicating that the VG has been killed.
*/
reply = _lvmlockd_send("kill_vg",
"cmd = %s", "lvmlockctl",
"pid = %d", getpid(),
"vg_name = %s", arg_vg_name,
NULL);
if (!_lvmlockd_result(reply, &result)) {
log_error("lvmlockd result %d", result);
rv = result;
} else {
rv = 0;
}
daemon_reply_destroy(reply);
/*
* FIXME: here is where we should implement a strong form of
* blkdeactivate, and if it completes successfully, automatically call
* do_drop() afterward. (The drop step may not always be necessary
* if the lvm commands run while shutting things down release all the
* leases.)
*
* run_strong_blkdeactivate();
* do_drop();
*/
return rv;
}
static int do_drop(void)
{
daemon_reply reply;
int result;
int rv;
syslog(LOG_WARNING, "Dropping locks for VG %s.", arg_vg_name);
/*
* Check for misuse by looking for any active LVs in the VG
* and refusing this operation if found? One possible way
* to kill LVs (e.g. if fs cannot be unmounted) is to suspend
* them, or replace them with the error target. In that
* case the LV will still appear to be active, but it is
* safe to release the lock.
*/
reply = _lvmlockd_send("drop_vg",
"cmd = %s", "lvmlockctl",
"pid = %d", getpid(),
"vg_name = %s", arg_vg_name,
NULL);
if (!_lvmlockd_result(reply, &result)) {
log_error("lvmlockd result %d", result);
rv = result;
} else {
rv = 0;
}
daemon_reply_destroy(reply);
return rv;
}
static void print_usage(void)
{
printf("lvmlockctl options\n");
@@ -599,16 +509,12 @@ static void print_usage(void)
printf(" Wait option for other commands.\n");
printf("--force | -f 0|1>\n");
printf(" Force option for other commands.\n");
printf("--kill | -k <vgname>\n");
printf(" Kill access to the VG when sanlock cannot renew lease.\n");
printf("--drop | -r <vgname>\n");
printf(" Clear locks for the VG when it is unused after kill (-k).\n");
printf("--gl-enable | -E <vgname>\n");
printf(" Tell lvmlockd to enable the global lock in a sanlock VG.\n");
printf("--gl-disable | -D <vgname>\n");
printf(" Tell lvmlockd to disable the global lock in a sanlock VG.\n");
printf("--stop-lockspaces | -S\n");
printf(" Stop all lockspaces.\n");
printf("--gl-enable <vg_name>\n");
printf(" Tell lvmlockd to enable the global lock in a sanlock vg.\n");
printf("--gl-disable <vg_name>\n");
printf(" Tell lvmlockd to disable the global lock in a sanlock vg.\n");
}
static int read_options(int argc, char *argv[])
@@ -623,8 +529,6 @@ static int read_options(int argc, char *argv[])
{"dump", no_argument, 0, 'd' },
{"wait", required_argument, 0, 'w' },
{"force", required_argument, 0, 'f' },
{"kill", required_argument, 0, 'k' },
{"drop", required_argument, 0, 'r' },
{"gl-enable", required_argument, 0, 'E' },
{"gl-disable", required_argument, 0, 'D' },
{"stop-lockspaces", no_argument, 0, 'S' },
@@ -637,7 +541,7 @@ static int read_options(int argc, char *argv[])
}
while (1) {
c = getopt_long(argc, argv, "hqidE:D:w:k:r:S", long_options, &option_index);
c = getopt_long(argc, argv, "hqidE:D:w:S", long_options, &option_index);
if (c == -1)
break;
@@ -661,21 +565,13 @@ static int read_options(int argc, char *argv[])
case 'w':
wait_opt = atoi(optarg);
break;
case 'k':
kill_vg = 1;
arg_vg_name = strdup(optarg);
break;
case 'r':
drop_vg = 1;
arg_vg_name = strdup(optarg);
break;
case 'E':
gl_enable = 1;
arg_vg_name = strdup(optarg);
able_vg_name = strdup(optarg);
break;
case 'D':
gl_disable = 1;
arg_vg_name = strdup(optarg);
able_vg_name = strdup(optarg);
break;
case 'S':
stop_lockspaces = 1;
@@ -720,24 +616,12 @@ int main(int argc, char **argv)
goto out;
}
if (kill_vg) {
rv = do_kill();
goto out;
}
if (drop_vg) {
rv = do_drop();
goto out;
}
if (gl_enable) {
syslog(LOG_INFO, "Enabling global lock in VG %s.", arg_vg_name);
rv = do_able("enable_gl");
goto out;
}
if (gl_disable) {
syslog(LOG_INFO, "Disabling global lock in VG %s.", arg_vg_name);
rv = do_able("disable_gl");
goto out;
}

View File

@@ -45,8 +45,5 @@ static inline void lvmlockd_close(daemon_handle h)
#define EMANAGER 214
#define EPREPARE 215
#define ELOCKD 216
#define EVGKILLED 217 /* sanlock lost access to leases and VG is killed. */
#define ELOCKIO 218 /* sanlock io errors during lock op, may be transient. */
#define EREMOVED 219
#endif /* _LVM_LVMLOCKD_CLIENT_H */

File diff suppressed because it is too large Load Diff

View File

@@ -67,8 +67,6 @@ int lm_data_size_dlm(void)
#define VG_LOCK_ARGS_MINOR 0
#define VG_LOCK_ARGS_PATCH 0
static int dlm_has_lvb_bug;
static int cluster_name_from_args(char *vg_args, char *clustername)
{
return last_string_from_args(vg_args, clustername);
@@ -162,7 +160,6 @@ int lm_prepare_lockspace_dlm(struct lockspace *ls)
{
char sys_clustername[MAX_ARGS+1];
char arg_clustername[MAX_ARGS+1];
uint32_t major = 0, minor = 0, patch = 0;
struct lm_dlm *lmd;
int rv;
@@ -173,17 +170,6 @@ int lm_prepare_lockspace_dlm(struct lockspace *ls)
if (rv < 0)
return -EMANAGER;
rv = dlm_kernel_version(&major, &minor, &patch);
if (rv < 0) {
log_error("prepare_lockspace_dlm kernel_version not detected %d", rv);
dlm_has_lvb_bug = 1;
}
if ((major == 6) && (minor == 0) && (patch == 1)) {
log_debug("dlm kernel version %u.%u.%u has lvb bug", major, minor, patch);
dlm_has_lvb_bug = 1;
}
if (!ls->vg_args[0]) {
/* global lockspace has no vg args */
goto skip_args;
@@ -260,6 +246,12 @@ int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg)
out:
free(lmd);
ls->lm_data = NULL;
if (!strcmp(ls->name, gl_lsname_dlm)) {
gl_running_dlm = 0;
gl_auto_dlm = 0;
}
return 0;
}
@@ -343,7 +335,7 @@ static int to_dlm_mode(int ld_mode)
}
static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out)
uint32_t *r_version)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
@@ -352,7 +344,7 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
int mode;
int rv;
memset(vb_out, 0, sizeof(struct val_blk));
*r_version = 0;
if (!r->lm_init) {
rv = lm_add_resource_dlm(ls, r, 0);
@@ -394,7 +386,7 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
(void *)1, (void *)1, (void *)1,
NULL, NULL);
if (rv == -1 && errno == -EAGAIN) {
if (rv == -EAGAIN) {
log_debug("S %s R %s adopt_dlm adopt mode %d try other mode",
ls->name, r->name, ld_mode);
rv = -EUCLEAN;
@@ -431,13 +423,14 @@ static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
*/
int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int adopt)
uint32_t *r_version, int adopt)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb;
struct val_blk vb;
uint32_t flags = 0;
uint16_t vb_version;
int mode;
int rv;
@@ -445,7 +438,7 @@ int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
/* When adopting, we don't follow the normal method
of acquiring a NL lock then converting it to the
desired mode. */
return lm_adopt_dlm(ls, r, ld_mode, vb_out);
return lm_adopt_dlm(ls, r, ld_mode, r_version);
}
if (!r->lm_init) {
@@ -473,37 +466,19 @@ int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
log_debug("S %s R %s lock_dlm", ls->name, r->name);
if (daemon_test) {
memset(vb_out, 0, sizeof(struct val_blk));
*r_version = 0;
return 0;
}
/*
* The dlm lvb bug means that converting NL->EX will not return
* the latest lvb, so we have to convert NL->PR->EX to reread it.
*/
if (dlm_has_lvb_bug && (ld_mode == LD_LK_EX)) {
rv = dlm_ls_lock_wait(lmd->dh, LKM_PRMODE, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv == -1) {
log_debug("S %s R %s lock_dlm acquire mode PR for %d rv %d",
ls->name, r->name, mode, rv);
goto lockrv;
}
/* Fall through to request EX. */
}
rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
lockrv:
if (rv == -1 && errno == EAGAIN) {
log_debug("S %s R %s lock_dlm acquire mode %d rv EAGAIN", ls->name, r->name, mode);
if (rv == -EAGAIN) {
log_error("S %s R %s lock_dlm mode %d rv EAGAIN", ls->name, r->name, mode);
return -EAGAIN;
}
if (rv < 0) {
log_error("S %s R %s lock_dlm acquire error %d errno %d", ls->name, r->name, rv, errno);
log_error("S %s R %s lock_dlm error %d", ls->name, r->name, rv);
return rv;
}
@@ -511,22 +486,28 @@ lockrv:
if (lksb->sb_flags & DLM_SBF_VALNOTVALID) {
log_debug("S %s R %s lock_dlm VALNOTVALID", ls->name, r->name);
memset(rdd->vb, 0, sizeof(struct val_blk));
memset(vb_out, 0, sizeof(struct val_blk));
*r_version = 0;
goto out;
}
/*
* 'vb' contains disk endian values, not host endian.
* It is copied directly to rdd->vb which is also kept
* in disk endian form.
* vb_out is returned to the caller in host endian form.
*/
memcpy(&vb, lksb->sb_lvbptr, sizeof(struct val_blk));
memcpy(rdd->vb, &vb, sizeof(vb));
vb_version = le16_to_cpu(vb.version);
vb_out->version = le16_to_cpu(vb.version);
vb_out->flags = le16_to_cpu(vb.flags);
vb_out->r_version = le32_to_cpu(vb.r_version);
if (vb_version && ((vb_version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) {
log_error("S %s R %s lock_dlm ignore vb_version %x",
ls->name, r->name, vb_version);
*r_version = 0;
free(rdd->vb);
rdd->vb = NULL;
lksb->sb_lvbptr = NULL;
goto out;
}
*r_version = le32_to_cpu(vb.r_version);
memcpy(rdd->vb, &vb, sizeof(vb)); /* rdd->vb saved as le */
log_debug("S %s R %s lock_dlm get r_version %u",
ls->name, r->name, *r_version);
}
out:
return 0;
@@ -570,7 +551,7 @@ int lm_convert_dlm(struct lockspace *ls, struct resource *r,
rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv == -1 && errno == EAGAIN) {
if (rv == -EAGAIN) {
/* FIXME: When does this happen? Should something different be done? */
log_error("S %s R %s convert_dlm mode %d rv EAGAIN", ls->name, r->name, mode);
return -EAGAIN;
@@ -582,17 +563,17 @@ int lm_convert_dlm(struct lockspace *ls, struct resource *r,
}
int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags)
uint32_t r_version, uint32_t lmuf_flags)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb = &rdd->lksb;
struct val_blk vb_prev;
struct val_blk vb_next;
uint32_t flags = 0;
int new_vb = 0;
int rv;
log_debug("S %s R %s unlock_dlm r_version %u flags %x",
ls->name, r->name, r_version, lmuf_flags);
/*
* Do not set PERSISTENT, because we don't need an orphan
* NL lock to protect anything.
@@ -600,46 +581,19 @@ int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
flags |= LKF_CONVERT;
if (rdd->vb && (r->mode == LD_LK_EX)) {
/* vb_prev and vb_next are in disk endian form */
memcpy(&vb_prev, rdd->vb, sizeof(struct val_blk));
memcpy(&vb_next, rdd->vb, sizeof(struct val_blk));
if (!vb_prev.version) {
vb_next.version = cpu_to_le16(VAL_BLK_VERSION);
new_vb = 1;
if (rdd->vb && r_version && (r->mode == LD_LK_EX)) {
if (!rdd->vb->version) {
/* first time vb has been written */
rdd->vb->version = cpu_to_le16(VAL_BLK_VERSION);
}
if (r_version)
rdd->vb->r_version = cpu_to_le32(r_version);
memcpy(lksb->sb_lvbptr, rdd->vb, sizeof(struct val_blk));
if ((lmu_flags & LMUF_FREE_VG) && (r->type == LD_RT_VG)) {
vb_next.flags = cpu_to_le16(VBF_REMOVED);
new_vb = 1;
}
if (r_version) {
vb_next.r_version = cpu_to_le32(r_version);
new_vb = 1;
}
if (new_vb) {
memcpy(rdd->vb, &vb_next, sizeof(struct val_blk));
memcpy(lksb->sb_lvbptr, &vb_next, sizeof(struct val_blk));
log_debug("S %s R %s unlock_dlm vb old %x %x %u new %x %x %u",
ls->name, r->name,
le16_to_cpu(vb_prev.version),
le16_to_cpu(vb_prev.flags),
le32_to_cpu(vb_prev.r_version),
le16_to_cpu(vb_next.version),
le16_to_cpu(vb_next.flags),
le32_to_cpu(vb_next.r_version));
} else {
log_debug("S %s R %s unlock_dlm vb unchanged", ls->name, r->name);
}
log_debug("S %s R %s unlock_dlm set r_version %u",
ls->name, r->name, r_version);
flags |= LKF_VALBLK;
} else {
log_debug("S %s R %s unlock_dlm", ls->name, r->name);
}
if (daemon_test)
@@ -662,65 +616,8 @@ int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
#define DLM_LOCKSPACES_PATH "/sys/kernel/config/dlm/cluster/spaces"
/*
* FIXME: this should be implemented differently.
* It's not nice to use an aspect of the dlm clustering
* implementation, which could change. It would be
* better to do something like use a special lock in the
* lockspace that was held PR by all nodes, and then an
* EX request on it could check if it's started (and
* possibly also notify others to stop it automatically).
* Or, possibly an enhancement to libdlm that would give
* info about lockspace members.
*
* (We could let the VG be removed while others still
* have the lockspace running, which largely works, but
* introduces problems if another VG with the same name is
* recreated while others still have the lockspace running
* for the previous VG. We'd also want a way to clean up
* the stale lockspaces on the others eventually.)
*/
int lm_hosts_dlm(struct lockspace *ls, int notify)
{
static const char closedir_err_msg[] = "lm_hosts_dlm: closedir failed";
char ls_nodes_path[PATH_MAX];
struct dirent *de;
DIR *ls_dir;
int count = 0;
memset(ls_nodes_path, 0, sizeof(ls_nodes_path));
snprintf(ls_nodes_path, PATH_MAX-1, "%s/%s/nodes",
DLM_LOCKSPACES_PATH, ls->name);
if (!(ls_dir = opendir(ls_nodes_path)))
return -ECONNREFUSED;
while ((de = readdir(ls_dir))) {
if (de->d_name[0] == '.')
continue;
count++;
}
if (closedir(ls_dir))
log_error(closedir_err_msg);
if (!count) {
log_error("lm_hosts_dlm found no nodes in %s", ls_nodes_path);
return 0;
}
/*
* Assume that a count of one node represents ourself,
* and any value over one represents other nodes.
*/
return count - 1;
}
int lm_get_lockspaces_dlm(struct list_head *ls_rejoin)
{
static const char closedir_err_msg[] = "lm_get_lockspace_dlm: closedir failed";
struct lockspace *ls;
struct dirent *de;
DIR *ls_dir;
@@ -737,7 +634,7 @@ int lm_get_lockspaces_dlm(struct list_head *ls_rejoin)
if (!(ls = alloc_lockspace())) {
if (closedir(ls_dir))
log_error(closedir_err_msg);
log_error("lm_get_lockspace_dlm: closedir failed");
return -ENOMEM;
}
@@ -747,8 +644,7 @@ int lm_get_lockspaces_dlm(struct list_head *ls_rejoin)
list_add_tail(&ls->list, ls_rejoin);
}
if (closedir(ls_dir))
log_error(closedir_err_msg);
closedir(ls_dir);
return 0;
}
@@ -764,4 +660,3 @@ int lm_is_running_dlm(void)
return 0;
return 1;
}

View File

@@ -50,9 +50,7 @@ enum {
LD_OP_RENAME_FINAL,
LD_OP_RUNNING_LM,
LD_OP_FIND_FREE_LOCK,
LD_OP_KILL_VG,
LD_OP_DROP_VG,
LD_OP_BUSY,
LD_OP_FORGET_VG_NAME,
};
/* resource types */
@@ -85,12 +83,11 @@ struct client {
unsigned int recv : 1;
unsigned int dead : 1;
unsigned int poll_ignore : 1;
unsigned int lock_ops : 1;
char name[MAX_NAME+1];
};
#define LD_AF_PERSISTENT 0x00000001
#define LD_AF_NO_CLIENT 0x00000002
#define LD_AF_UNUSED 0x00000002 /* use me */
#define LD_AF_UNLOCK_CANCEL 0x00000004
#define LD_AF_NEXT_VERSION 0x00000008
#define LD_AF_WAIT 0x00000010
@@ -101,10 +98,10 @@ struct client {
#define LD_AF_SEARCH_LS 0x00000200
#define LD_AF_WAIT_STARTING 0x00001000
#define LD_AF_DUP_GL_LS 0x00002000
#define LD_AF_INACTIVE_LS 0x00004000
#define LD_AF_ADD_LS_ERROR 0x00008000
#define LD_AF_ADOPT 0x00010000
#define LD_AF_WARN_GL_REMOVED 0x00020000
#define LD_AF_LV_LOCK 0x00040000
#define LD_AF_LV_UNLOCK 0x00080000
/*
* Number of times to repeat a lock request after
@@ -143,13 +140,12 @@ struct resource {
int8_t mode;
unsigned int sh_count; /* number of sh locks on locks list */
uint32_t version;
uint32_t last_client_id; /* last client_id to lock or unlock resource */
unsigned int lm_init : 1; /* lm_data is initialized */
unsigned int adopt : 1; /* temp flag in remove_inactive_lvs */
unsigned int version_zero_valid : 1;
unsigned int use_vb : 1;
struct list_head locks;
struct list_head actions;
struct val_blk *vb;
char lv_args[MAX_ARGS+1];
char lm_data[0]; /* lock manager specific data */
};
@@ -188,19 +184,13 @@ struct lockspace {
unsigned int sanlock_gl_enabled: 1;
unsigned int sanlock_gl_dup: 1;
unsigned int free_vg: 1;
unsigned int kill_vg: 1;
unsigned int drop_vg: 1;
struct list_head actions; /* new client actions */
struct list_head resources; /* resource/lock state for gl/vg/lv */
};
/* val_blk version */
#define VAL_BLK_VERSION 0x0101
/* val_blk flags */
#define VBF_REMOVED 0x0001
struct val_blk {
uint16_t version;
uint16_t flags;
@@ -320,9 +310,13 @@ static inline int list_empty(const struct list_head *head)
EXTERN int gl_type_static;
EXTERN int gl_use_dlm;
EXTERN int gl_use_sanlock;
EXTERN pthread_mutex_t gl_type_mutex;
EXTERN char gl_lsname_dlm[MAX_NAME+1];
EXTERN char gl_lsname_sanlock[MAX_NAME+1];
EXTERN int global_dlm_lockspace_exists;
EXTERN int gl_running_dlm;
EXTERN int gl_auto_dlm;
EXTERN int daemon_test; /* run as much as possible without a live lock manager */
EXTERN int daemon_debug;
@@ -359,7 +353,7 @@ int lm_prepare_lockspace_dlm(struct lockspace *ls);
int lm_add_lockspace_dlm(struct lockspace *ls, int adopt);
int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg);
int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int adopt);
uint32_t *r_version, int adopt);
int lm_convert_dlm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version);
int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
@@ -368,7 +362,6 @@ int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r);
int lm_get_lockspaces_dlm(struct list_head *ls_rejoin);
int lm_data_size_dlm(void);
int lm_is_running_dlm(void);
int lm_hosts_dlm(struct lockspace *ls, int notify);
static inline int lm_support_dlm(void)
{
@@ -398,7 +391,7 @@ static inline int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg)
}
static inline int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int adopt)
uint32_t *r_version, int adopt)
{
return -1;
}
@@ -440,11 +433,6 @@ static inline int lm_support_dlm(void)
return 0;
}
static inline int lm_hosts_dlm(struct lockspace *ls, int notify)
{
return 0;
}
#endif /* dlm support */
#ifdef LOCKDSANLOCK_SUPPORT
@@ -457,7 +445,7 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls);
int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt);
int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg);
int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int *retry, int adopt);
uint32_t *r_version, int *retry, int adopt);
int lm_convert_sanlock(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version);
int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
@@ -515,7 +503,7 @@ static inline int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg)
}
static inline int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int *retry, int adopt)
uint32_t *r_version, int *retry, int adopt)
{
return -1;
}

View File

@@ -33,101 +33,52 @@
#include <sys/socket.h>
/*
-------------------------------------------------------------------------------
For each VG, lvmlockd creates a sanlock lockspace that holds the leases for
that VG. There's a lease for the VG lock, and there's a lease for each active
LV. sanlock maintains (reads/writes) these leases, which exist on storage.
That storage is a hidden LV within the VG: /dev/vg/lvmlock. lvmlockd gives the
path of this internal LV to sanlock, which then reads/writes the leases on it.
# lvs -a cc -o+uuid
LV VG Attr LSize LV UUID
lv1 cc -wi-a----- 2.00g 7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa
lv2 cc -wi-a----- 100.00g exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT
[lvmlock] cc -wi-ao---- 256.00m iLpDel-hR0T-hJ3u-rnVo-PcDh-mcjt-sF9egM
# sanlock status
s lvm_cc:1:/dev/mapper/cc-lvmlock:0
r lvm_cc:exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT:/dev/mapper/cc-lvmlock:71303168:13 p 26099
r lvm_cc:7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa:/dev/mapper/cc-lvmlock:70254592:3 p 26099
This shows that sanlock is maintaining leases on /dev/mapper/cc-lvmlock.
sanlock acquires a lockspace lease when the lockspace is joined, i.e. when the
VG is started by 'vgchange --lock-start cc'. This lockspace lease exists at
/dev/mapper/cc-lvmlock offset 0, and sanlock regularly writes to it to maintain
ownership of it. Joining the lockspace (by acquiring the lockspace lease in
it) then allows standard resource leases to be acquired in the lockspace for
whatever the application wants. lvmlockd uses resource leases for the VG lock
and LV locks.
sanlock acquires a resource lease for each actual lock that lvm commands use.
Above, there are two LV locks that are held because the two LVs are active.
These are on /dev/mapper/cc-lvmlock at offsets 71303168 and 70254592. sanlock
does not write to these resource leases except when acquiring and releasing
them (e.g. lvchange -ay/-an). The renewal of the lockspace lease maintains
ownership of all the resource leases in the lockspace.
If the host loses access to the disk that the sanlock lv lives on, then sanlock
can no longer renew its lockspace lease. The lockspace lease will eventually
expire, at which point the host will lose ownership of it, and of all resource
leases it holds in the lockspace. Eventually, other hosts will be able to
acquire those leases. sanlock ensures that another host will not be able to
acquire one of the expired leases until the current host has quit using it.
It is important that the host "quit using" the leases it is holding if the
sanlock storage is lost and they begin expiring. If the host cannot quit using
the leases and release them within a limited time, then sanlock will use the
local watchdog to forcibly reset the host before any other host can acquire
them. This is severe, but preferable to possibly corrupting the data protected
by the lease. It ensures that two nodes will not be using the same lease at
once. For LV leases, that means that another host will not be able to activate
the LV while another host still has it active.
sanlock notifies the application that it cannot renew the lockspace lease. The
application needs to quit using all leases in the lockspace and release them as
quickly as possible. In the initial version, lvmlockd ignored this
notification, so sanlock would eventually reach the point where it would use
the local watchdog to reset the host. However, it's better to attempt a
response. If that response succeeds, the host can avoid being reset. If the
response fails, then sanlock will eventually reset the host as the last resort.
sanlock gives the application about 40 seconds to complete its response and
release its leases before resetting the host.
An application can specify the path and args of a program that sanlock should
run to notify it if the lockspace lease cannot be renewed. This program should
carry out the application's response to the expiring leases: attempt to quit
using the leases and then release them. lvmlockd gives this command to sanlock
for each VG when that VG is started: 'lvmlockctl --kill vg_name'
If sanlock loses access to lease storage in that VG, it runs lvmlockctl --kill,
which:
1. Uses syslog to explain what is happening.
2. Notifies lvmlockd that the VG is being killed, so lvmlockd can
immediatley return an error for this condition if any new lock
requests are made. (This step would not be strictly necessary.)
3. Attempts to quit using the VG. This is not yet implemented, but
will eventually use blkdeactivate on the VG (or a more forceful
equivalent.)
4. If step 3 was successful at terminating all use of the VG, then
lvmlockd is told to release all the leases for the VG. If this
is all done without about 40 seconds, the host can avoid being
reset.
Until steps 3 and 4 are fully implemented, manual steps can be substituted.
This is primarily for testing since the problem needs to be noticed and
responded to in a very short time. The manual alternative to step 3 is to kill
any processes using file systems on LV's in the VG, unmount all file systems on
the LVs, and deactivate all the LVs. Once this is done, the manual alternative
to step 4 is to run 'lvmlockctl --drop vg_name', which tells lvmlockd to
release all the leases for the VG.
-------------------------------------------------------------------------------
*/
* If access to the pv containing the vg's leases is lost, sanlock cannot renew
* the leases we have acquired for locked LVs. This means that we could soon
* loose the lease to another host which could activate our LV exclusively. We
* do not want to get to the point of two hosts having the same LV active
* exclusively (it obviously violates the purpose of LV locks.)
*
* The default method of preventing this problem is for lvmlockd to do nothing,
* which produces a safe but potentially inconvenient result. Doing nothing
* leads to our LV leases not being released, which leads to sanlock using the
* local watchdog to reset us before another host can acquire our lock. It
* would often be preferrable to avoid the abrupt hard reset from the watchdog.
*
* There are other options to avoid being reset by our watchdog. If we can
* quickly stop using the LVs in question and release the locks for them, then
* we could avoid a reset (there's a certain grace period of about 40 seconds
* in which we can attempt this.) To do this, we can tell sanlock to run a
* specific program when it has lost access to our leases. We could use this
* program to:
*
* 1. Deactivate all lvs in the effected vg. If all the leases are
* deactivated, then our LV locks would be released and sanlock would no longer
* use the watchdog to reset us. If file systems are mounted on the active
* lvs, then deactivating them would fail, so this option would be of limited
* usefulness.
*
* 2. Option 1 could be extended to kill pids using the fs on the lv, unmount
* the fs, and deactivate the lv. This is probably out of scope for lvm
* directly, and would likely need the help of another system service.
*
* 3. Use dmsetup suspend to block access to lvs in the effected vg. If this
* was successful, the local host could no longer write to the lvs, we could
* safely release the LV locks, and sanlock would no longer reset us. At this
* point, with suspended lvs, the host would be in a fairly hobbled state, and
* would almost certainly need a manual, forcible reset.
*
* 4. Option 3 could be extended to monitor the lost storage, and if it is
* reconnected, the leases could be reacquired, and the suspended lvs resumed
* (reacquiring leases will fail if another host has acquired them since they
* were released.) This complexity of this option, combined with the fact that
* the error conditions are often not as simple as storage being lost and then
* later connecting, will result in this option being too unreliable.
*
* Add a config option that we could use to select a different behavior than
* the default. Then implement one of the simpler options as a proof of
* concept, which could be extended if needed.
*/
/*
* Each lockspace thread has its own sanlock daemon connection.
@@ -1010,24 +961,12 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
char lock_lv_name[MAX_ARGS+1];
char lsname[SANLK_NAME_LEN + 1];
char disk_path[SANLK_PATH_LEN];
char killpath[SANLK_PATH_LEN];
char killargs[SANLK_PATH_LEN];
int gl_found;
int ret, rv;
memset(disk_path, 0, sizeof(disk_path));
memset(lock_lv_name, 0, sizeof(lock_lv_name));
/*
* Construct the path to lvmlockctl by using the path to the lvm binary
* and appending "lockctl" to get /path/to/lvmlockctl.
*/
memset(killpath, 0, sizeof(killpath));
snprintf(killpath, SANLK_PATH_LEN - 1, "%slockctl", LVM_PATH);
memset(killargs, 0, sizeof(killargs));
snprintf(killargs, SANLK_PATH_LEN - 1, "--kill %s", ls->vg_name);
rv = check_args_version(ls->vg_args, VG_LOCK_ARGS_MAJOR);
if (rv < 0) {
ret = -EARGS;
@@ -1112,15 +1051,6 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
goto fail;
}
log_debug("set killpath to %s %s", killpath, killargs);
rv = sanlock_killpath(lms->sock, 0, killpath, killargs);
if (rv < 0) {
log_error("S %s killpath error %d", lsname, rv);
ret = -EMANAGER;
goto fail;
}
rv = sanlock_restrict(lms->sock, SANLK_RESTRICT_SIGKILL);
if (rv < 0) {
log_error("S %s restrict error %d", lsname, rv);
@@ -1300,7 +1230,7 @@ int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r)
}
int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, int *retry, int adopt)
uint32_t *r_version, int *retry, int adopt)
{
struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
@@ -1308,6 +1238,7 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
uint64_t lock_lv_offset;
uint32_t flags = 0;
struct val_blk vb;
uint16_t vb_version;
int added = 0;
int rv;
@@ -1383,7 +1314,7 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
(unsigned long long)rs->disks[0].offset);
if (daemon_test) {
memset(vb_out, 0, sizeof(struct val_blk));
*r_version = 0;
return 0;
}
@@ -1392,15 +1323,6 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
if (adopt)
flags |= SANLK_ACQUIRE_ORPHAN_ONLY;
#ifdef SANLOCK_HAS_ACQUIRE_OWNER_NOWAIT
/*
* Don't block waiting for a failed lease to expire since it causes
* sanlock_acquire to block for a long time, which would prevent this
* thread from processing other lock requests.
*/
flags |= SANLK_ACQUIRE_OWNER_NOWAIT;
#endif
rv = sanlock_acquire(lms->sock, -1, flags, 1, &rs, NULL);
if (rv == -EAGAIN) {
@@ -1471,30 +1393,15 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
return -EAGAIN;
}
#ifdef SANLOCK_HAS_ACQUIRE_OWNER_NOWAIT
if (rv == SANLK_ACQUIRE_OWNED_RETRY) {
/*
* The lock is held by a failed host, and will eventually
* expire. If we retry we'll eventually acquire the lock
* (or find someone else has acquired it). The EAGAIN retry
* attempts for SH locks above would not be sufficient for
* the length of expiration time. We could add a longer
* retry time here to cover the full expiration time and block
* the activation command for that long. For now just return
* the standard error indicating that another host still owns
* the lease. FIXME: return a different error number so the
* command can print an different error indicating that the
* owner of the lease is in the process of expiring?
*/
log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv);
*retry = 0;
return -EAGAIN;
}
#endif
if (rv < 0) {
log_error("S %s R %s lock_san acquire error %d",
ls->name, r->name, rv);
if (added) {
lm_rem_resource_sanlock(ls, r);
return rv;
}
/* if the gl has been disabled, remove and free the gl resource */
if ((rv == SANLK_LEADER_RESOURCE) && (r->type == LD_RT_GL)) {
if (!lm_gl_is_enabled(ls)) {
@@ -1506,22 +1413,6 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
}
}
if (added)
lm_rem_resource_sanlock(ls, r);
/* sanlock gets i/o errors trying to read/write the leases. */
if (rv == -EIO)
rv = -ELOCKIO;
/*
* The sanlock lockspace can disappear if the lease storage fails,
* the delta lease renewals fail, the lockspace enters recovery,
* lvmlockd holds no leases in the lockspace, so sanlock can
* stop and free the lockspace.
*/
if (rv == -ENOSPC)
rv = -ELOCKIO;
return rv;
}
@@ -1529,23 +1420,26 @@ int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
rv = sanlock_get_lvb(0, rs, (char *)&vb, sizeof(vb));
if (rv < 0) {
log_error("S %s R %s lock_san get_lvb error %d", ls->name, r->name, rv);
memset(rds->vb, 0, sizeof(struct val_blk));
memset(vb_out, 0, sizeof(struct val_blk));
*r_version = 0;
goto out;
}
/*
* 'vb' contains disk endian values, not host endian.
* It is copied directly to rrs->vb which is also kept
* in disk endian form.
* vb_out is returned to the caller in host endian form.
*/
vb_version = le16_to_cpu(vb.version);
memcpy(rds->vb, &vb, sizeof(vb));
if (vb_version && ((vb_version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) {
log_error("S %s R %s lock_san ignore vb_version %x",
ls->name, r->name, vb_version);
*r_version = 0;
free(rds->vb);
rds->vb = NULL;
goto out;
}
vb_out->version = le16_to_cpu(vb.version);
vb_out->flags = le16_to_cpu(vb.flags);
vb_out->r_version = le32_to_cpu(vb.r_version);
*r_version = le32_to_cpu(vb.r_version);
memcpy(rds->vb, &vb, sizeof(vb)); /* rds->vb saved as le */
log_debug("S %s R %s lock_san get r_version %u",
ls->name, r->name, *r_version);
}
out:
return rv;
@@ -1700,11 +1594,9 @@ int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
}
rv = sanlock_release(lms->sock, -1, 0, 1, &rs);
if (rv < 0)
if (rv < 0) {
log_error("S %s R %s unlock_san release error %d", ls->name, r->name, rv);
if (rv == -EIO)
rv = -ELOCKIO;
}
return rv;
}

View File

@@ -83,12 +83,6 @@ static int _init(struct daemon_state *s)
struct lvmpolld_state *ls = s->private;
ls->log = s->log;
/*
* log warnings to stderr by default. Otherwise we would miss any lvpoll
* error messages in default configuration
*/
daemon_log_enable(ls->log, DAEMON_LOG_OUTLET_STDERR, DAEMON_LOG_WARN, 1);
if (!daemon_log_parse(ls->log, DAEMON_LOG_OUTLET_STDERR, ls->log_config, 1))
return 0;
@@ -290,7 +284,7 @@ static int poll_for_output(struct lvmpolld_lv *pdlv, struct lvmpolld_thread_data
"caught input data in STDERR");
assert(read_single_line(data, 1)); /* may block indef. anyway */
WARN(pdlv->ls, "%s: PID %d: %s: '%s'", LVM2_LOG_PREFIX,
INFO(pdlv->ls, "%s: PID %d: %s: '%s'", LVM2_LOG_PREFIX,
pdlv->cmd_pid, "STDERR", data->line);
} else if (fds[1].revents) {
if (fds[1].revents & POLLHUP)
@@ -333,19 +327,15 @@ static int poll_for_output(struct lvmpolld_lv *pdlv, struct lvmpolld_thread_data
if (fds[1].fd >= 0)
while (read_single_line(data, 1)) {
assert(r > 0);
WARN(pdlv->ls, "%s: PID %d: %s: %s", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDERR", data->line);
INFO(pdlv->ls, "%s: PID %d: %s: %s", LVM2_LOG_PREFIX, pdlv->cmd_pid, "STDERR", data->line);
}
if (WIFEXITED(ch_stat)) {
INFO(pdlv->ls, "%s: %s (PID %d) %s (%d)", PD_LOG_PREFIX,
"lvm2 cmd", pdlv->cmd_pid, "exited with", WEXITSTATUS(ch_stat));
cmd_state.retcode = WEXITSTATUS(ch_stat);
if (cmd_state.retcode)
ERROR(pdlv->ls, "%s: %s (PID %d) %s (retcode: %d)", PD_LOG_PREFIX,
"lvm2 cmd", pdlv->cmd_pid, "failed", cmd_state.retcode);
else
INFO(pdlv->ls, "%s: %s (PID %d) %s", PD_LOG_PREFIX,
"lvm2 cmd", pdlv->cmd_pid, "finished successfully");
} else if (WIFSIGNALED(ch_stat)) {
ERROR(pdlv->ls, "%s: %s (PID %d) %s (%d)", PD_LOG_PREFIX,
WARN(pdlv->ls, "%s: %s (PID %d) %s (%d)", PD_LOG_PREFIX,
"lvm2 cmd", pdlv->cmd_pid, "got terminated by signal",
WTERMSIG(ch_stat));
cmd_state.signal = WTERMSIG(ch_stat);
@@ -576,8 +566,6 @@ static struct lvmpolld_lv *construct_pdlv(request req, struct lvmpolld_state *ls
return NULL;
}
pdlv->cmdargv = cmdargv;
cmdenvp = cmdenvp_ctr(pdlv);
if (!cmdenvp) {
pdlv_destroy(pdlv);
@@ -585,6 +573,7 @@ static struct lvmpolld_lv *construct_pdlv(request req, struct lvmpolld_state *ls
return NULL;
}
pdlv->cmdargv = cmdargv;
pdlv->cmdenvp = cmdenvp;
return pdlv;
@@ -595,16 +584,12 @@ static int spawn_detached_thread(struct lvmpolld_lv *pdlv)
int r;
pthread_attr_t attr;
if (pthread_attr_init(&attr) != 0)
return 0;
if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
return 0;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
r = pthread_create(&pdlv->tid, &attr, fork_and_poll, (void *)pdlv);
if (pthread_attr_destroy(&attr) != 0)
return 0;
pthread_attr_destroy(&attr);
return !r;
}

View File

@@ -45,6 +45,7 @@
@top_srcdir@/lib/metadata/vg.h
@top_srcdir@/lib/mm/memlock.h
@top_srcdir@/lib/mm/xlate.h
@top_builddir@/lib/misc/configure.h
@top_srcdir@/lib/misc/crc.h
@top_srcdir@/lib/misc/intl.h
@top_srcdir@/lib/misc/last-path-component.h
@@ -55,6 +56,7 @@
@top_srcdir@/lib/misc/lvm-globals.h
@top_srcdir@/lib/misc/lvm-signal.h
@top_srcdir@/lib/misc/lvm-string.h
@top_builddir@/lib/misc/lvm-version.h
@top_srcdir@/lib/misc/lvm-percent.h
@top_srcdir@/lib/misc/lvm-wrappers.h
@top_srcdir@/lib/misc/sharedlib.h

View File

@@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@@ -35,5 +35,5 @@ device-mapper: all
cflow: all
DISTCLEAN_TARGETS += .symlinks configure.h lvm-version.h
DISTCLEAN_TARGETS += .symlinks
CLEAN_TARGETS += $(LINKS) .include_symlinks .symlinks_created

View File

@@ -62,7 +62,6 @@ SOURCES =\
device/dev-swap.c \
device/dev-type.c \
device/dev-luks.c \
device/dev-dasd.c \
display/display.c \
error/errseg.c \
unknown/unknown.c \
@@ -231,4 +230,4 @@ CFLAGS += $(BLKID_CFLAGS) $(UDEV_CFLAGS) $(VALGRIND_CFLAGS)
$(SUBDIRS): $(LIB_STATIC)
CLEAN_TARGETS += misc/configure.h misc/lvm-version.h
DISTCLEAN_TARGETS += misc/configure.h misc/lvm-version.h

View File

@@ -143,23 +143,23 @@ static int _get_segment_status_from_target_params(const char *target_name,
return 0;
}
if (segtype_is_cache(segtype)) {
if (!strcmp(segtype->name, "cache")) {
if (!dm_get_status_cache(seg_status->mem, params, &(seg_status->cache)))
return_0;
seg_status->type = SEG_STATUS_CACHE;
} else if (segtype_is_raid(segtype)) {
} else if (!strcmp(segtype->name, "raid")) {
if (!dm_get_status_raid(seg_status->mem, params, &seg_status->raid))
return_0;
seg_status->type = SEG_STATUS_RAID;
} else if (segtype_is_thin_volume(segtype)) {
} else if (!strcmp(segtype->name, "thin")) {
if (!dm_get_status_thin(seg_status->mem, params, &seg_status->thin))
return_0;
seg_status->type = SEG_STATUS_THIN;
} else if (segtype_is_thin_pool(segtype)) {
} else if (!strcmp(segtype->name, "thin-pool")) {
if (!dm_get_status_thin_pool(seg_status->mem, params, &seg_status->thin_pool))
return_0;
seg_status->type = SEG_STATUS_THIN_POOL;
} else if (segtype_is_snapshot(segtype)) {
} else if (!strcmp(segtype->name, "snapshot")) {
if (!dm_get_status_snapshot(seg_status->mem, params, &seg_status->snapshot))
return_0;
seg_status->type = SEG_STATUS_SNAPSHOT;
@@ -518,73 +518,6 @@ out:
return r;
}
static int _ignore_unusable_thins(struct device *dev)
{
/* TODO make function for thin testing */
struct dm_pool *mem;
struct dm_status_thin_pool *status;
struct dm_task *dmt = NULL;
void *next = NULL;
uint64_t start, length;
char *target_type = NULL;
char *params;
int minor, major;
int r = 0;
if (!(mem = dm_pool_create("unusable_thins", 128)))
return_0;
if (!(dmt = dm_task_create(DM_DEVICE_TABLE)))
goto_out;
if (!dm_task_no_open_count(dmt))
goto_out;
if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1))
goto_out;
if (!dm_task_run(dmt)) {
log_error("Failed to get state of mapped device.");
goto out;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (sscanf(params, "%d:%d", &minor, &major) != 2) {
log_error("Failed to get thin-pool major:minor for thin device %d:%d.",
(int)MAJOR(dev->dev), (int)MINOR(dev->dev));
goto out;
}
dm_task_destroy(dmt);
if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
goto_out;
if (!dm_task_no_flush(dmt))
log_warn("Can't set no_flush.");
if (!dm_task_no_open_count(dmt))
goto_out;
if (!dm_task_set_major_minor(dmt, minor, major, 1))
goto_out;
if (!dm_task_run(dmt)) {
log_error("Failed to get state of mapped device.");
goto out;
}
dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
if (!dm_get_status_thin_pool(mem, params, &status))
return_0;
if (status->read_only || status->out_of_data_space) {
log_warn("WARNING: %s: Thin's thin-pool needs inspection.",
dev_name(dev));
goto out;
}
r = 1;
out:
if (dmt)
dm_task_destroy(dmt);
dm_pool_destroy(mem);
return r;
}
/*
* device_is_usable
* @dev
@@ -713,13 +646,6 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check)
goto out;
}
/* TODO: extend check struct ? */
if (target_type && !strcmp(target_type, "thin") &&
!_ignore_unusable_thins(dev)) {
log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
goto out;
}
if (target_type && strcmp(target_type, "error"))
only_error_target = 0;
} while (next);
@@ -1045,11 +971,6 @@ static int _percent_run(struct dev_manager *dm, const char *name,
wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, 0, 0, 0)))
return_0;
/* No freeze on overfilled thin-pool, read existing slightly outdated data */
if (lv && lv_is_thin_pool(lv) &&
!dm_task_no_flush(dmt))
log_warn("Can't set no_flush flag."); /* Non fatal */
if (!dm_task_run(dmt))
goto_out;
@@ -1117,7 +1038,7 @@ static int _percent_run(struct dev_manager *dm, const char *name,
goto_out;
}
log_debug_activation("LV percent: %.2f", dm_percent_to_float(*overall_percent));
log_debug_activation("LV percent: %f", dm_percent_to_float(*overall_percent));
r = 1;
out:
@@ -3277,7 +3198,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
break;
case SUSPEND:
dm_tree_skip_lockfs(root);
if (!dm->flush_required && !lv_is_pvmove(lv))
if (!dm->flush_required && lv_is_mirror(lv) && !lv_is_pvmove(lv))
dm_tree_use_no_flush_suspend(root);
/* Fall through */
case SUSPEND_WITH_LOCKFS:
@@ -3296,14 +3217,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
goto_out;
if ((dm_tree_node_size_changed(root) < 0))
dm->flush_required = 1;
/* Currently keep the code require flush for any
* non 'thin pool/volume, mirror' or with any size change */
if (!lv_is_thin_volume(lv) &&
!lv_is_thin_pool(lv) &&
(!lv_is_mirror(lv) || dm_tree_node_size_changed(root)))
if (dm_tree_node_size_changed(root))
dm->flush_required = 1;
if (action == ACTIVATE) {

32
lib/cache/lvmcache.c vendored
View File

@@ -1542,6 +1542,10 @@ int lvmcache_update_vgname_and_id(struct lvmcache_info *info, struct lvmcache_vg
vgid = vgname;
}
/* When using lvmetad, the PV could not have become orphaned. */
if (lvmetad_active() && is_orphan_vg(vgname) && info->vginfo)
return 1;
/* If PV without mdas is already in a real VG, don't make it orphan */
if (is_orphan_vg(vgname) && info->vginfo &&
mdas_empty_or_ignored(&info->mdas) &&
@@ -1867,8 +1871,8 @@ struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
* device already exists? Things don't seem to work
* if we do that for some reason.
*/
log_debug_cache("Found same device %s with same pvid %s",
dev_name(existing->dev), pvid_s);
log_verbose("Found same device %s with same pvid %s",
dev_name(existing->dev), pvid_s);
}
/*
@@ -2354,27 +2358,3 @@ int lvmcache_contains_lock_type_sanlock(struct cmd_context *cmd)
return 0;
}
void lvmcache_get_max_name_lengths(struct cmd_context *cmd,
unsigned *pv_max_name_len,
unsigned *vg_max_name_len)
{
struct lvmcache_vginfo *vginfo;
struct lvmcache_info *info;
unsigned len;
*vg_max_name_len = 0;
*pv_max_name_len = 0;
dm_list_iterate_items(vginfo, &_vginfos) {
len = strlen(vginfo->vgname);
if (*vg_max_name_len < len)
*vg_max_name_len = len;
dm_list_iterate_items(info, &vginfo->infos) {
len = strlen(dev_name(info->dev));
if (*pv_max_name_len < len)
*pv_max_name_len = len;
}
}
}

View File

@@ -190,8 +190,4 @@ void lvmcache_set_preferred_duplicates(const char *vgid);
int lvmcache_contains_lock_type_sanlock(struct cmd_context *cmd);
void lvmcache_get_max_name_lengths(struct cmd_context *cmd,
unsigned *pv_max_name_len, unsigned *vg_max_name_len);
#endif

141
lib/cache/lvmetad.c vendored
View File

@@ -37,70 +37,6 @@ static struct cmd_context *_lvmetad_cmd = NULL;
static struct volume_group *lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg);
static int _log_debug_inequality(const char *name, struct dm_config_node *a, struct dm_config_node *b)
{
int result = 0;
int final_result = 0;
if (a->v && b->v) {
result = compare_value(a->v, b->v);
if (result) {
struct dm_config_value *av = a->v;
struct dm_config_value *bv = b->v;
if (!strcmp(a->key, b->key)) {
if (a->v->type == DM_CFG_STRING && b->v->type == DM_CFG_STRING)
log_debug_lvmetad("VG %s metadata inequality at %s / %s: %s / %s",
name, a->key, b->key, av->v.str, bv->v.str);
else if (a->v->type == DM_CFG_INT && b->v->type == DM_CFG_INT)
log_debug_lvmetad("VG %s metadata inequality at %s / %s: " FMTi64 " / " FMTi64,
name, a->key, b->key, av->v.i, bv->v.i);
else
log_debug_lvmetad("VG %s metadata inequality at %s / %s: type %d / type %d",
name, a->key, b->key, av->type, bv->type);
} else {
log_debug_lvmetad("VG %s metadata inequality at %s / %s", name, a->key, b->key);
}
final_result = result;
}
}
if (a->v && !b->v) {
log_debug_lvmetad("VG %s metadata inequality at %s / %s", name, a->key, b->key);
final_result = 1;
}
if (!a->v && b->v) {
log_debug_lvmetad("VG %s metadata inequality at %s / %s", name, a->key, b->key);
final_result = -1;
}
if (a->child && b->child) {
result = _log_debug_inequality(name, a->child, b->child);
if (result)
final_result = result;
}
if (a->sib && b->sib) {
result = _log_debug_inequality(name, a->sib, b->sib);
if (result)
final_result = result;
}
if (a->sib && !b->sib) {
log_debug_lvmetad("VG %s metadata inequality at %s / %s", name, a->key, b->key);
final_result = 1;
}
if (!a->sib && b->sib) {
log_debug_lvmetad("VG %s metadata inequality at %s / %s", name, a->key, b->key);
final_result = -1;
}
return final_result;
}
void lvmetad_disconnect(void)
{
if (_lvmetad_connected)
@@ -498,7 +434,6 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
struct format_type *fmt;
struct dm_config_node *pvcn;
struct pv_list *pvl;
int rescan = 0;
if (!lvmetad_active())
return NULL;
@@ -557,56 +492,16 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
if (!(vg = import_vg_from_lvmetad_config_tree(reply.cft, fid)))
goto_out;
/*
* Read the VG from disk, ignoring the lvmetad copy in these
* cases:
*
* 1. The host is not using lvmlockd, but is reading lockd VGs
* using the --shared option. The shared option is meant to
* let hosts not running lvmlockd look at lockd VGs, like the
* foreign option allows hosts to look at foreign VGs. When
* --foreign is used, the code forces a rescan since the local
* lvmetad cache of foreign VGs is likely stale. Similarly,
* for --shared, have the code reading the shared VGs below
* not use the cached copy from lvmetad but to rescan the VG.
*
* 2. The host failed to acquire the VG lock from lvmlockd for
* the lockd VG. In this case, the usual mechanisms for
* updating the lvmetad copy of the VG have been missed. Since
* we don't know if the cached copy is valid, assume it's not.
*
* 3. lvmetad has returned the "vg_invalid" flag, which is the
* usual mechanism used by lvmlockd/lvmetad to cause a host to
* reread a VG from disk that has been modified from another
* host.
*/
if (is_lockd_type(vg->lock_type) && cmd->include_shared_vgs) {
log_debug_lvmetad("Rescan VG %s because including shared", vgname);
rescan = 1;
} else if (is_lockd_type(vg->lock_type) && cmd->lockd_vg_rescan) {
log_debug_lvmetad("Rescan VG %s because no lvmlockd lock is held", vgname);
rescan = 1;
} else if (dm_config_find_node(reply.cft->root, "vg_invalid")) {
log_debug_lvmetad("Rescan VG %s because lvmetad returned invalid", vgname);
rescan = 1;
}
/*
* locking may have detected a newer vg version and
* invalidated the cached vg.
*/
if (rescan) {
if (dm_config_find_node(reply.cft->root, "vg_invalid")) {
log_debug_lvmetad("Update invalid lvmetad cache for VG %s", vgname);
vg2 = lvmetad_pvscan_vg(cmd, vg);
release_vg(vg);
vg = vg2;
if (!vg) {
log_debug_lvmetad("VG %s from lvmetad not found during rescan.", vgname);
fid = NULL;
goto out;
} else
fid = vg->fid;
fid = vg->fid;
}
dm_list_iterate_items(pvl, &vg->pvs) {
@@ -1186,8 +1081,6 @@ static int _lvmetad_pvscan_single(struct metadata_area *mda, void *baton)
* due to something like an lvcreate from another host.
* This is limited to changes that only affect the vg (not global state like
* orphan PVs), so we only need to reread mdas on the vg's existing pvs.
* But, a previous PV in the VG may have been removed since we last read
* the VG, and that PV may have been reused for another VG.
*/
static struct volume_group *lvmetad_pvscan_vg(struct cmd_context *cmd, struct volume_group *vg)
@@ -1200,7 +1093,6 @@ static struct volume_group *lvmetad_pvscan_vg(struct cmd_context *cmd, struct vo
struct format_instance *fid;
struct format_instance_ctx fic = { .type = 0 };
struct _lvmetad_pvscan_baton baton;
struct device *save_dev = NULL;
dm_list_iterate_items(pvl, &vg->pvs) {
/* missing pv */
@@ -1227,25 +1119,9 @@ static struct volume_group *lvmetad_pvscan_vg(struct cmd_context *cmd, struct vo
lvmcache_foreach_mda(info, _lvmetad_pvscan_single, &baton);
/*
* The PV may have been removed from the VG by another host
* since we last read the VG.
*/
if (!baton.vg) {
log_debug_lvmetad("Did not find VG %s in scan of PV %s", vg->name, dev_name(pvl->pv->dev));
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
continue;
}
/*
* The PV may have been removed from the VG and used for a
* different VG since we last read the VG.
*/
if (strcmp(baton.vg->name, vg->name)) {
log_debug_lvmetad("Did not find VG %s in scan of PV %s which is now VG %s",
vg->name, dev_name(pvl->pv->dev), baton.vg->name);
release_vg(baton.vg);
continue;
return NULL;
}
if (!(vgmeta = export_vg_to_config_tree(baton.vg))) {
@@ -1256,12 +1132,9 @@ static struct volume_group *lvmetad_pvscan_vg(struct cmd_context *cmd, struct vo
if (!vgmeta_ret) {
vgmeta_ret = vgmeta;
save_dev = pvl->pv->dev;
} else {
if (compare_config(vgmeta_ret->root, vgmeta->root)) {
log_error("VG %s metadata comparison failed for device %s vs %s",
vg->name, dev_name(pvl->pv->dev), save_dev ? dev_name(save_dev) : "none");
_log_debug_inequality(vg->name, vgmeta_ret->root, vgmeta->root);
if (!compare_config(vgmeta_ret->root, vgmeta->root)) {
log_error("VG metadata comparison failed");
dm_config_destroy(vgmeta);
dm_config_destroy(vgmeta_ret);
release_vg(baton.vg);
@@ -1331,7 +1204,7 @@ int lvmetad_pvscan_single(struct cmd_context *cmd, struct device *dev,
log_warn("WARNING: Ignoring obsolete format of metadata (%s) on device %s when using lvmetad",
baton.fid->fmt->name, dev_name(dev));
else
log_error("Ignoring obsolete format of metadata (%s) on device %s when using lvmetad.",
log_error("WARNING: Ignoring obsolete format of metadata (%s) on device %s when using lvmetad",
baton.fid->fmt->name, dev_name(dev));
lvmcache_fmt(info)->ops->destroy_instance(baton.fid);
@@ -1683,7 +1556,7 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force)
return;
}
if (!lvmetad_active())
if (!lvmetad_used())
return;
log_debug_lvmetad("Validating global lvmetad cache");

View File

@@ -25,11 +25,6 @@
#include "lv_alloc.h"
#include "defaults.h"
static const char _cache_module[] = "cache";
/* TODO: using static field here, maybe should be a part of segment_type */
static unsigned _feature_mask;
#define SEG_LOG_ERROR(t, p...) \
log_error(t " segment %s of logical volume %s.", ## p, \
dm_config_parent_name(sn), seg->lv->name), 0;
@@ -71,15 +66,23 @@ static int _cache_pool_text_import(struct lv_segment *seg,
if (dm_config_has_node(sn, "cache_mode")) {
if (!(str = dm_config_find_str(sn, "cache_mode", NULL)))
return SEG_LOG_ERROR("cache_mode must be a string in");
if (!cache_set_mode(seg, str))
if (!set_cache_pool_feature(&seg->feature_flags, str))
return SEG_LOG_ERROR("Unknown cache_mode in");
}
} else
/* When missed in metadata, it's an old stuff - use writethrough */
seg->feature_flags |= DM_CACHE_FEATURE_WRITETHROUGH;
if (dm_config_has_node(sn, "policy")) {
if (!(str = dm_config_find_str(sn, "policy", NULL)))
return SEG_LOG_ERROR("policy must be a string in");
if (!(seg->policy_name = dm_pool_strdup(mem, str)))
return SEG_LOG_ERROR("Failed to duplicate policy in");
} else {
/* Cannot use 'just' default, so pick one */
seg->policy_name = DEFAULT_CACHE_POOL_POLICY; /* FIXME make configurable */
/* FIXME maybe here should be always 'mq' */
log_warn("WARNING: cache_policy undefined, using default \"%s\" policy.",
seg->policy_name);
}
/*
@@ -100,9 +103,6 @@ static int _cache_pool_text_import(struct lv_segment *seg,
* If the policy is not present, default policy is used.
*/
if ((sn = dm_config_find_node(sn, "policy_settings"))) {
if (!seg->policy_name)
return SEG_LOG_ERROR("policy_settings must have a policy_name in");
if (sn->v)
return SEG_LOG_ERROR("policy_settings must be a section in");
@@ -131,33 +131,28 @@ static int _cache_pool_text_export(const struct lv_segment *seg,
{
const char *cache_mode;
if (!(cache_mode = get_cache_pool_cachemode_name(seg)))
return_0;
if (!seg->policy_name) {
log_error(INTERNAL_ERROR "Policy name for %s is not defined.",
display_lvname(seg->lv));
return 0;
}
outf(f, "data = \"%s\"", seg_lv(seg, 0)->name);
outf(f, "metadata = \"%s\"", seg->metadata_lv->name);
outf(f, "chunk_size = %" PRIu32, seg->chunk_size);
outf(f, "cache_mode = \"%s\"", cache_mode);
outf(f, "policy = \"%s\"", seg->policy_name);
/*
* Cache pool used by a cache LV holds data. Not ideal,
* but not worth to break backward compatibility, by shifting
* content to cache segment
*/
if (cache_mode_is_set(seg)) {
if (!(cache_mode = get_cache_mode_name(seg)))
return_0;
outf(f, "cache_mode = \"%s\"", cache_mode);
}
if (seg->policy_name) {
outf(f, "policy = \"%s\"", seg->policy_name);
if (seg->policy_settings) {
if (strcmp(seg->policy_settings->key, "policy_settings")) {
log_error(INTERNAL_ERROR "Incorrect policy_settings tree, %s.",
seg->policy_settings->key);
return 0;
}
if (seg->policy_settings->child)
out_config_node(f, seg->policy_settings);
if (seg->policy_settings) {
if (strcmp(seg->policy_settings->key, "policy_settings")) {
log_error(INTERNAL_ERROR "Incorrect policy_settings tree, %s.",
seg->policy_settings->key);
return 0;
}
out_config_node(f, seg->policy_settings);
}
return 1;
@@ -170,29 +165,12 @@ static void _destroy(struct segment_type *segtype)
#ifdef DEVMAPPER_SUPPORT
static int _target_present(struct cmd_context *cmd,
const struct lv_segment *seg __attribute__((unused)),
unsigned *attributes __attribute__((unused)))
const struct lv_segment *seg __attribute__((unused)),
unsigned *attributes __attribute__((unused)))
{
/* List of features with their kernel target version */
static const struct feature {
uint32_t maj;
uint32_t min;
unsigned cache_feature;
const char feature[12];
const char module[12]; /* check dm-%s */
} _features[] = {
{ 1, 3, CACHE_FEATURE_POLICY_MQ, "policy_mq", "cache-mq" },
{ 1, 8, CACHE_FEATURE_POLICY_SMQ, "policy_smq", "cache-smq" },
};
static const char _lvmconf[] = "global/cache_disabled_features";
static unsigned _attrs = 0;
uint32_t maj, min, patchlevel;
static int _cache_checked = 0;
static int _cache_present = 0;
uint32_t maj, min, patchlevel;
unsigned i;
const struct dm_config_node *cn;
const struct dm_config_value *cv;
const char *str;
if (!_cache_checked) {
_cache_present = target_present(cmd, "cache", 1);
@@ -206,53 +184,11 @@ static int _target_present(struct cmd_context *cmd,
if ((maj < 1) ||
((maj == 1) && (min < 3))) {
_cache_present = 0;
log_error("The cache kernel module is version %u.%u.%u. "
"Version 1.3.0+ is required.",
log_error("The cache kernel module is version %u.%u.%u."
" Version 1.3.0+ is required.",
maj, min, patchlevel);
return 0;
}
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i) {
if (((maj > _features[i].maj) ||
(maj == _features[i].maj && min >= _features[i].min)) &&
(!_features[i].module[0] || module_present(cmd, _features[i].module)))
_attrs |= _features[i].cache_feature;
else
log_very_verbose("Target %s does not support %s.",
_cache_module, _features[i].feature);
}
}
if (attributes) {
if (!_feature_mask) {
/* Support runtime lvm.conf changes, N.B. avoid 32 feature */
if ((cn = find_config_tree_array(cmd, global_cache_disabled_features_CFG, NULL))) {
for (cv = cn->v; cv; cv = cv->next) {
if (cv->type != DM_CFG_STRING) {
log_error("Ignoring invalid string in config file %s.",
_lvmconf);
continue;
}
str = cv->v.str;
if (!*str)
continue;
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
if (strcasecmp(str, _features[i].feature) == 0)
_feature_mask |= _features[i].cache_feature;
}
}
_feature_mask = ~_feature_mask;
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
if ((_attrs & _features[i].cache_feature) &&
!(_feature_mask & _features[i].cache_feature))
log_very_verbose("Target %s %s support disabled by %s",
_cache_module, _features[i].feature, _lvmconf);
}
*attributes = _attrs & _feature_mask;
}
return _cache_present;
@@ -378,9 +314,7 @@ static int _cache_add_target_line(struct dev_manager *dm,
metadata_uuid,
data_uuid,
origin_uuid,
seg->cleaner_policy ? "cleaner" :
/* undefined policy name -> likely an old "mq" */
cache_pool_seg->policy_name ? : "mq",
seg->cleaner_policy ? "cleaner" : cache_pool_seg->policy_name,
seg->cleaner_policy ? NULL : cache_pool_seg->policy_settings,
cache_pool_seg->chunk_size))
return_0;
@@ -420,7 +354,7 @@ int init_cache_segtypes(struct cmd_context *cmd,
return 0;
}
segtype->name = SEG_TYPE_NAME_CACHE_POOL;
segtype->name = "cache-pool";
segtype->flags = SEG_CACHE_POOL | SEG_CANNOT_BE_ZEROED | SEG_ONLY_EXCLUSIVE;
segtype->ops = &_cache_pool_ops;
@@ -434,7 +368,7 @@ int init_cache_segtypes(struct cmd_context *cmd,
return 0;
}
segtype->name = SEG_TYPE_NAME_CACHE;
segtype->name = "cache";
segtype->flags = SEG_CACHE | SEG_ONLY_EXCLUSIVE;
segtype->ops = &_cache_ops;
@@ -442,8 +376,5 @@ int init_cache_segtypes(struct cmd_context *cmd,
return_0;
log_very_verbose("Initialised segtype: %s", segtype->name);
/* Reset mask for recalc */
_feature_mask = 0;
return 1;
}

View File

@@ -362,8 +362,7 @@ static void _init_logging(struct cmd_context *cmd)
/* Tell device-mapper about our logging */
#ifdef DEVMAPPER_SUPPORT
if (!dm_log_is_non_default())
dm_log_with_errno_init(print_log);
dm_log_with_errno_init(print_log);
#endif
reset_log_duplicated();
reset_lvm_errno(1);
@@ -1070,7 +1069,7 @@ static struct dev_filter *_init_lvmetad_filter_chain(struct cmd_context *cmd)
nr_filt++;
}
/* global regex filter. Optional. */
/* regex filter. Optional. */
if ((cn = find_config_tree_node(cmd, devices_global_filter_CFG, NULL))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create global regex device filter");
@@ -1079,17 +1078,6 @@ static struct dev_filter *_init_lvmetad_filter_chain(struct cmd_context *cmd)
nr_filt++;
}
/* regex filter. Optional. */
if (!lvmetad_used()) {
if ((cn = find_config_tree_node(cmd, devices_filter_CFG, NULL))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create regex device filter");
goto bad;
}
nr_filt++;
}
}
/* device type filter. Required. */
if (!(filters[nr_filt] = lvm_type_filter_create(cmd->dev_types))) {
log_error("Failed to create lvm type filter");
@@ -1157,24 +1145,26 @@ bad:
* md component filter -> fw raid filter
*
* - cmd->filter - the filter chain used for lvmetad responses:
* persistent filter -> regex_filter -> usable device filter(FILTER_MODE_POST_LVMETAD)
* persistent filter -> usable device filter(FILTER_MODE_POST_LVMETAD) ->
* regex filter
*
* - cmd->full_filter - the filter chain used for all the remaining situations:
* cmd->lvmetad_filter -> cmd->filter
* lvmetad_filter -> filter
*
* If lvmetad is not used, there's just one filter chain:
* If lvmetad isnot used, there's just one filter chain:
*
* - cmd->filter == cmd->full_filter:
* persistent filter -> sysfs filter -> global regex filter ->
* regex_filter -> type filter -> usable device filter(FILTER_MODE_NO_LVMETAD) ->
* mpath component filter -> partitioned filter -> md component filter -> fw raid filter
* persistent filter -> regex filter -> sysfs filter ->
* global regex filter -> type filter ->
* usable device filter(FILTER_MODE_NO_LVMETAD) ->
* mpath component filter -> partitioned filter ->
* md component filter -> fw raid filter
*
*/
int init_filters(struct cmd_context *cmd, unsigned load_persistent_cache)
{
const char *dev_cache;
struct dev_filter *filter = NULL, *filter_components[2] = {0};
int nr_filt;
struct stat st;
const struct dm_config_node *cn;
struct timespec ts, cts;
@@ -1203,26 +1193,26 @@ int init_filters(struct cmd_context *cmd, unsigned load_persistent_cache)
*/
/* filter component 0 */
if (lvmetad_used()) {
nr_filt = 0;
if ((cn = find_config_tree_array(cmd, devices_filter_CFG, NULL))) {
if (!(filter_components[nr_filt] = regex_filter_create(cn->v))) {
log_verbose("Failed to create regex device filter.");
goto bad;
}
nr_filt++;
}
if (!(filter_components[nr_filt] = usable_filter_create(cmd->dev_types, FILTER_MODE_POST_LVMETAD))) {
if (!(filter_components[0] = usable_filter_create(cmd->dev_types, FILTER_MODE_POST_LVMETAD))) {
log_verbose("Failed to create usable device filter.");
goto bad;
}
nr_filt++;
if (!(filter = composite_filter_create(nr_filt, 0, filter_components)))
goto_bad;
} else {
filter = cmd->lvmetad_filter;
filter_components[0] = cmd->lvmetad_filter;
cmd->lvmetad_filter = NULL;
}
/* filter component 1 */
if ((cn = find_config_tree_array(cmd, devices_filter_CFG, NULL))) {
if (!(filter_components[1] = regex_filter_create(cn->v)))
goto_bad;
/* we have two filter components - create composite filter */
if (!(filter = composite_filter_create(2, 0, filter_components)))
goto_bad;
} else
/* we have only one filter component - no need to create composite filter */
filter = filter_components[0];
if (!(dev_cache = find_config_tree_str(cmd, devices_cache_CFG, NULL)))
goto_bad;
@@ -1234,12 +1224,9 @@ int init_filters(struct cmd_context *cmd, unsigned load_persistent_cache)
cmd->filter = filter;
if (lvmetad_used()) {
nr_filt = 0;
filter_components[nr_filt] = cmd->lvmetad_filter;
nr_filt++;
filter_components[nr_filt] = cmd->filter;
nr_filt++;
if (!(cmd->full_filter = composite_filter_create(nr_filt, 0, filter_components)))
filter_components[0] = cmd->lvmetad_filter;
filter_components[1] = cmd->filter;
if (!(cmd->full_filter = composite_filter_create(2, 0, filter_components)))
goto_bad;
} else
cmd->full_filter = filter;
@@ -2192,6 +2179,7 @@ void destroy_toolcontext(struct cmd_context *cmd)
lvmetad_release_token();
lvmetad_disconnect();
lvmpolld_disconnect();
cmd->initialized.connections = 0;
release_log_memory();
activation_exit();

View File

@@ -133,8 +133,6 @@ struct cmd_context {
unsigned lockd_gl_disable:1;
unsigned lockd_vg_disable:1;
unsigned lockd_lv_disable:1;
unsigned lockd_gl_removed:1;
unsigned lockd_vg_rescan:1;
unsigned lockd_vg_default_sh:1;
unsigned lockd_vg_enforce_sh:1;

View File

@@ -581,11 +581,8 @@ int config_file_read(struct dm_config_tree *cft)
if (!(cf->dev = dev_create_file(filename, NULL, NULL, 1)))
return_0;
if (!dev_open_readonly_buffered(cf->dev)) {
dev_destroy_file(cf->dev);
cf->dev = NULL;
if (!dev_open_readonly_buffered(cf->dev))
return_0;
}
}
r = config_file_read_fd(cft, cf->dev, 0, (size_t) info.st_size, 0, 0,
@@ -1694,11 +1691,6 @@ static int _out_prefix_fn(const struct dm_config_node *cn, const char *line, voi
if (cfg_def->comment) {
int pos = 0;
while (_copy_one_line(cfg_def->comment, commentline, &pos, strlen(cfg_def->comment))) {
if ((commentline[0] == '#') && (strlen(commentline) == 1)) {
if (!out->tree_spec->withspaces)
continue;
commentline[0] = '\0';
}
fprintf(out->fp, "%s# %s\n", line, commentline);
/* withsummary prints only the first comment line. */
if (!out->tree_spec->withcomments)
@@ -1721,9 +1713,6 @@ static int _out_prefix_fn(const struct dm_config_node *cn, const char *line, voi
if (cfg_def->flags & CFG_DEFAULT_UNDEFINED)
fprintf(out->fp, "%s# This configuration %s does not have a default value defined.\n", line, node_type_name);
if (cfg_def->flags & CFG_DEFAULT_COMMENTED)
fprintf(out->fp, "%s# This configuration %s has an automatic default value.\n", line, node_type_name);
if ((out->tree_spec->type == CFG_DEF_TREE_FULL) &&
(out->tree_spec->check_status[cn->id] & CFG_USED))
fprintf(out->fp, "%s# Value defined in existing configuration has been used for this setting.\n", line);

View File

@@ -50,7 +50,7 @@ struct profile_params {
struct dm_list profiles; /* list of profiles which are loaded already and which are ready for use */
};
#define CFG_PATH_MAX_LEN 128
#define CFG_PATH_MAX_LEN 64
/*
* Structures used for definition of a configuration tree.
@@ -296,7 +296,5 @@ int get_default_allocation_thin_pool_chunk_size_CFG(struct cmd_context *cmd, str
#define get_default_unconfigured_allocation_thin_pool_chunk_size_CFG NULL
int get_default_allocation_cache_pool_chunk_size_CFG(struct cmd_context *cmd, struct profile *profile);
#define get_default_unconfigured_allocation_cache_pool_chunk_size_CFG NULL
const char *get_default_allocation_cache_policy_CFG(struct cmd_context *cmd, struct profile *profile);
#define get_default_unconfigured_allocation_cache_policy_CFG NULL
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -63,8 +63,6 @@
#define DEFAULT_MIRROR_LOG_FAULT_POLICY "allocate"
#define DEFAULT_MIRROR_IMAGE_FAULT_POLICY "remove"
#define DEFAULT_MIRROR_MAX_IMAGES 8 /* limited by kernel DM_KCOPYD_MAX_REGIONS */
#define DEFAULT_RAID_MAX_IMAGES 8
#define DEFAULT_RAID_FAULT_POLICY "warn"
#define DEFAULT_DMEVENTD_RAID_LIB "libdevmapper-event-lvm2raid.so"
@@ -119,8 +117,8 @@
#define DEFAULT_CACHE_POOL_CHUNK_SIZE 64 /* KB */
#define DEFAULT_CACHE_POOL_MIN_METADATA_SIZE 2048 /* KB */
#define DEFAULT_CACHE_POOL_MAX_METADATA_SIZE (16 * 1024 * 1024) /* KB */
#define DEFAULT_CACHE_POLICY "mq"
#define DEFAULT_CACHE_MODE "writethrough"
#define DEFAULT_CACHE_POOL_CACHEMODE "writethrough"
#define DEFAULT_CACHE_POOL_POLICY "mq"
#define DEFAULT_UMASK 0077
@@ -201,8 +199,6 @@
#define DEFAULT_REP_LIST_ITEM_SEPARATOR ","
#define DEFAULT_TIME_FORMAT "%Y-%m-%d %T %z"
#define DEFAULT_COMPACT_OUTPUT_COLS ""
#define DEFAULT_LVS_COLS "lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
#define DEFAULT_VGS_COLS "vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
#define DEFAULT_PVS_COLS "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"

View File

@@ -16,14 +16,11 @@
#include "lib.h"
#include "str_list.h"
#include <ctype.h>
struct dm_list *str_list_create(struct dm_pool *mem)
{
struct dm_list *sl;
if (!(sl = mem ? dm_pool_alloc(mem, sizeof(struct dm_list))
: dm_malloc(sizeof(struct dm_list)))) {
if (!(sl = dm_pool_alloc(mem, sizeof(struct dm_list)))) {
log_errno(ENOMEM, "str_list allocation failed");
return NULL;
}
@@ -40,8 +37,7 @@ static int _str_list_add_no_dup_check(struct dm_pool *mem, struct dm_list *sll,
if (!str)
return_0;
if (!(sln = mem ? dm_pool_alloc(mem, sizeof(*sln))
: dm_malloc(sizeof(*sln))))
if (!(sln = dm_pool_alloc(mem, sizeof(*sln))))
return_0;
sln->str = str;
@@ -162,101 +158,3 @@ int str_list_lists_equal(const struct dm_list *sll, const struct dm_list *sll2)
return 1;
}
char *str_list_to_str(struct dm_pool *mem, const struct dm_list *list,
const char *delim)
{
size_t delim_len = strlen(delim);
unsigned list_size = dm_list_size(list);
struct dm_str_list *sl;
char *str, *p;
size_t len = 0;
unsigned i = 0;
dm_list_iterate_items(sl, list)
len += strlen(sl->str);
if (list_size > 1)
len += ((list_size - 1) * delim_len);
str = mem ? dm_pool_alloc(mem, len+1) : dm_malloc(len+1);
if (!str) {
log_error("str_list_to_str: string allocation failed.");
return NULL;
}
str[len] = '\0';
p = str;
dm_list_iterate_items(sl, list) {
len = strlen(sl->str);
memcpy(p, sl->str, len);
p += len;
if (++i != list_size) {
memcpy(p, delim, delim_len);
p += delim_len;
}
}
return str;
}
struct dm_list *str_to_str_list(struct dm_pool *mem, const char *str,
const char *delim, int ignore_multiple_delim)
{
size_t delim_len = strlen(delim);
struct dm_list *list;
const char *p1, *p2, *next;
char *str_item;
size_t len;
if (!(list = str_list_create(mem))) {
log_error("str_to_str_list: string list allocation failed.");
return NULL;
}
p1 = p2 = str;
while (*p1) {
if (!(p2 = strstr(p1, delim)))
next = p2 = str + strlen(str);
else
next = p2 + delim_len;
len = p2 - p1;
str_item = mem ? dm_pool_alloc(mem, len+1) : dm_malloc(len+1);
if (!str_item) {
log_error("str_to_str_list: string list item allocation failed.");
goto bad;
}
memcpy(str_item, p1, len);
str_item[len] = '\0';
if (!str_list_add_no_dup_check(mem, list, str_item))
goto_bad;
if (ignore_multiple_delim) {
while (!strncmp(next, delim, delim_len))
next += delim_len;
}
p1 = next;
}
return list;
bad:
if (mem)
dm_pool_free(mem, list);
return NULL;
}
void str_list_destroy(struct dm_list *list, int deallocate_strings)
{
struct dm_str_list *sl, *tmp_sl;
dm_list_iterate_items_safe(sl, tmp_sl, list) {
dm_list_del(&sl->list);
if (deallocate_strings)
dm_free((char *)sl->str);
dm_free(sl);
}
dm_free(list);
}

View File

@@ -30,9 +30,5 @@ int str_list_match_list(const struct dm_list *sll, const struct dm_list *sll2, c
int str_list_lists_equal(const struct dm_list *sll, const struct dm_list *sll2);
int str_list_dup(struct dm_pool *mem, struct dm_list *sllnew,
const struct dm_list *sllold);
char *str_list_to_str(struct dm_pool *mem, const struct dm_list *list, const char *delim);
struct dm_list *str_to_str_list(struct dm_pool *mem, const char *str, const char *delim, int ignore_multiple_delim);
/* Only for lists which were *not* allocated from the mem pool! */
void str_list_destroy(struct dm_list *list, int deallocate_strings);
#endif

View File

@@ -71,16 +71,6 @@ static void _dev_init(struct device *dev, int max_error_count)
dm_list_init(&dev->open_list);
}
void dev_destroy_file(struct device *dev)
{
if (!(dev->flags & DEV_ALLOCED))
return;
dm_free((void *) dm_list_item(dev->aliases.n, struct dm_str_list)->str);
dm_free(dev->aliases.n);
dm_free(dev);
}
struct device *dev_create_file(const char *filename, struct device *dev,
struct dm_str_list *alias, int use_malloc)
{
@@ -341,13 +331,11 @@ static int _add_alias(struct device *dev, const char *path)
if (!dm_list_empty(&dev->aliases)) {
oldpath = dm_list_item(dev->aliases.n, struct dm_str_list)->str;
prefer_old = _compare_paths(path, oldpath);
log_debug_devs("%s: Aliased to %s in device cache%s (%d:%d)",
path, oldpath, prefer_old ? "" : " (preferred name)",
(int) MAJOR(dev->dev), (int) MINOR(dev->dev));
log_debug_devs("%s: Aliased to %s in device cache%s",
path, oldpath, prefer_old ? "" : " (preferred name)");
} else
log_debug_devs("%s: Added to device cache (%d:%d)", path,
(int) MAJOR(dev->dev), (int) MINOR(dev->dev));
log_debug_devs("%s: Added to device cache", path);
if (prefer_old)
dm_list_add(&dev->aliases, &sl->list);
@@ -948,7 +936,6 @@ struct device *dev_cache_get(const char *name, struct dev_filter *f)
{
struct stat buf;
struct device *d = (struct device *) dm_hash_lookup(_cache.names, name);
int info_available = 0;
if (d && (d->flags & DEV_REGULAR))
return d;
@@ -959,8 +946,7 @@ struct device *dev_cache_get(const char *name, struct dev_filter *f)
dm_hash_remove(_cache.names, name);
log_sys_very_verbose("stat", name);
d = NULL;
} else
info_available = 1;
}
if (d && (buf.st_rdev != d->dev)) {
dm_hash_remove(_cache.names, name);
@@ -968,7 +954,7 @@ struct device *dev_cache_get(const char *name, struct dev_filter *f)
}
if (!d) {
_insert(name, info_available ? &buf : NULL, 0, obtain_device_list_from_udev());
_insert(name, &buf, 0, obtain_device_list_from_udev());
d = (struct device *) dm_hash_lookup(_cache.names, name);
if (!d) {
_full_scan(0);

View File

@@ -1,111 +0,0 @@
/*
* Copyright (C) 2015 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "lib.h"
#include "metadata.h"
#include "dev-type.h"
#include <sys/ioctl.h>
#ifdef __linux__
/*
* Interface taken from kernel header arch/s390/include/uapi/asm/dasd.h
*/
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Copyright IBM Corp. 1999, 2000
* EMC Symmetrix ioctl Copyright EMC Corporation, 2008
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
#define DASD_IOCTL_LETTER 'D'
#define DASD_API_VERSION 6
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
unsigned int format; /* format info like formatted/cdl/ldl/... */
unsigned int features; /* dasd features like 'ro',... */
unsigned int reserved0; /* reserved for further use ,... */
unsigned int reserved1; /* reserved for further use ,... */
unsigned int reserved2; /* reserved for further use ,... */
unsigned int reserved3; /* reserved for further use ,... */
unsigned int reserved4; /* reserved for further use ,... */
unsigned int reserved5; /* reserved for further use ,... */
unsigned int reserved6; /* reserved for further use ,... */
unsigned int reserved7; /* reserved for further use ,... */
} dasd_information2_t;
#define DASD_FORMAT_CDL 2
/* Get information on a dasd device (enhanced) */
#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t)
/*
* End of included interface.
*/
int dasd_is_cdl_formatted(struct device *dev)
{
int ret = 0;
dasd_information2_t dasd_info2;
if (!dev_open_readonly(dev))
return_0;
if (ioctl(dev->fd, BIODASDINFO2, &dasd_info2)) {
log_sys_error("ioctl BIODASDINFO2", dev_name(dev));
goto out;
}
if (dasd_info2.format == DASD_FORMAT_CDL)
ret = 1;
out:
if (!dev_close(dev))
stack;
return ret;
}
#else
int dasd_is_cdl_formatted(struct device *dev)
{
return 0;
}
#endif

View File

@@ -586,8 +586,12 @@ static void _close(struct device *dev)
log_debug_devs("Closed %s", dev_name(dev));
if (dev->flags & DEV_ALLOCED)
dev_destroy_file(dev);
if (dev->flags & DEV_ALLOCED) {
dm_free((void *) dm_list_item(dev->aliases.n, struct dm_str_list)->
str);
dm_free(dev->aliases.n);
dm_free(dev);
}
}
static int _dev_close(struct device *dev, int immediate)

View File

@@ -363,7 +363,7 @@ static int _native_dev_is_partitioned(struct dev_types *dt, struct device *dev)
return 0;
/* Unpartitioned DASD devices are not supported. */
if ((MAJOR(dev->dev) == dt->dasd_major) && dasd_is_cdl_formatted(dev))
if (MAJOR(dev->dev) == dt->dasd_major)
return 1;
if (!dev_open_readonly_quiet(dev)) {
@@ -651,13 +651,8 @@ static int _wipe_known_signatures_with_blkid(struct device *dev, const char *nam
BLKID_SUBLKS_BADCSUM);
while (!blkid_do_probe(probe)) {
if ((r_wipe = _blkid_wipe(probe, dev, name, types_to_exclude, types_no_prompt, yes, force)) == 1) {
if ((r_wipe = _blkid_wipe(probe, dev, name, types_to_exclude, types_no_prompt, yes, force)) == 1)
(*wiped)++;
if (blkid_probe_step_back(probe)) {
log_error("Failed to step back blkid probe to check just wiped signature.");
goto out;
}
}
/* do not count excluded types */
if (r_wipe != 2)
found++;
@@ -739,20 +734,13 @@ int wipe_known_signatures(struct cmd_context *cmd, struct device *dev,
uint32_t types_no_prompt, int yes, force_t force,
int *wiped)
{
int blkid_wiping_enabled = find_config_tree_bool(cmd, allocation_use_blkid_wiping_CFG, NULL);
#ifdef BLKID_WIPING_SUPPORT
if (blkid_wiping_enabled)
if (find_config_tree_bool(cmd, allocation_use_blkid_wiping_CFG, NULL))
return _wipe_known_signatures_with_blkid(dev, name,
types_to_exclude,
types_no_prompt,
yes, force, wiped);
#endif
if (blkid_wiping_enabled) {
log_warn("allocation/use_blkid_wiping=1 configuration setting is set "
"while LVM is not compiled with blkid wiping support.");
log_warn("Falling back to native LVM signature detection.");
}
return _wipe_known_signatures_with_lvm(dev, name,
types_to_exclude,
types_no_prompt,

View File

@@ -59,7 +59,6 @@ int major_is_scsi_device(struct dev_types *dt, int major);
int dev_is_md(struct device *dev, uint64_t *sb);
int dev_is_swap(struct device *dev, uint64_t *signature);
int dev_is_luks(struct device *dev, uint64_t *signature);
int dasd_is_cdl_formatted(struct device *dev);
/* Signature wiping. */
#define TYPE_LVM1_MEMBER 0x001

View File

@@ -123,7 +123,6 @@ void dev_flush(struct device *dev);
struct device *dev_create_file(const char *filename, struct device *dev,
struct dm_str_list *alias, int use_malloc);
void dev_destroy_file(struct device *dev);
/* Return a valid device name from the alias list; NULL otherwise */
const char *dev_name_confirmed(struct device *dev, int quiet);

View File

@@ -95,7 +95,7 @@ struct segment_type *init_error_segtype(struct cmd_context *cmd)
return_NULL;
segtype->ops = &_error_ops;
segtype->name = SEG_TYPE_NAME_ERROR;
segtype->name = "error";
segtype->flags = SEG_CAN_SPLIT | SEG_VIRTUAL | SEG_CANNOT_BE_ZEROED;
log_very_verbose("Initialised segtype: %s", segtype->name);

View File

@@ -61,8 +61,6 @@ static int _dev_is_fwraid(struct device *dev)
return 0;
}
#define MSG_SKIPPING "%s: Skipping firmware RAID component device"
static int _ignore_fwraid(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
@@ -74,11 +72,8 @@ static int _ignore_fwraid(struct dev_filter *f __attribute__((unused)),
ret = _dev_is_fwraid(dev);
if (ret == 1) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
dev_ext_name(dev), dev->ext.handle);
log_debug_devs("%s: Skipping firmware RAID component device [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
return 0;
}

View File

@@ -18,8 +18,6 @@
#ifdef __linux__
#define MSG_SKIPPING "%s: Skipping md component device"
static int _ignore_md(struct dev_filter *f __attribute__((unused)),
struct device *dev)
{
@@ -31,11 +29,8 @@ static int _ignore_md(struct dev_filter *f __attribute__((unused)),
ret = dev_is_md(dev, NULL);
if (ret == 1) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
dev_ext_name(dev), dev->ext.handle);
log_debug_devs("%s: Skipping md component device [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
return 0;
}

View File

@@ -244,16 +244,11 @@ static int _dev_is_mpath(struct dev_filter *f, struct device *dev)
return 0;
}
#define MSG_SKIPPING "%s: Skipping mpath component device"
static int _ignore_mpath(struct dev_filter *f, struct device *dev)
{
if (_dev_is_mpath(f, dev) == 1) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
dev_ext_name(dev), dev->ext.handle);
log_debug_devs("%s: Skipping mpath component device [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
return 0;
}

View File

@@ -16,18 +16,13 @@
#include "lib.h"
#include "filter.h"
#define MSG_SKIPPING "%s: Skipping: Partition table signature found"
static int _passes_partitioned_filter(struct dev_filter *f, struct device *dev)
{
struct dev_types *dt = (struct dev_types *) f->private;
if (dev_is_partitioned(dt, dev)) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else
log_debug_devs(MSG_SKIPPING " [%s:%p]", dev_name(dev),
dev_ext_name(dev), dev->ext.handle);
log_debug_devs("%s: Skipping: Partition table signature found [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
return 0;
}

View File

@@ -29,19 +29,22 @@ static int _native_check_pv_min_size(struct device *dev)
/* Check it's accessible */
if (!dev_open_readonly_quiet(dev)) {
log_debug_devs("%s: Skipping: open failed", dev_name(dev));
log_debug_devs("%s: Skipping: open failed [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
return 0;
}
/* Check it's not too small */
if (!dev_get_size(dev, &size)) {
log_debug_devs("%s: Skipping: dev_get_size failed", dev_name(dev));
log_debug_devs("%s: Skipping: dev_get_size failed [%s:%p]",
dev_name(dev), dev_ext_name(dev), dev->ext.handle);
goto out;
}
if (size < pv_min_size()) {
log_debug_devs("%s: Skipping: %s", dev_name(dev),
_too_small_to_hold_pv_msg);
log_debug_devs("%s: Skipping: %s [%s:%p]", dev_name(dev),
_too_small_to_hold_pv_msg,
dev_ext_name(dev), dev->ext.handle);
goto out;
}
@@ -112,11 +115,26 @@ static int _passes_usable_filter(struct dev_filter *f, struct device *dev)
{
filter_mode_t mode = *((filter_mode_t *) f->private);
struct dev_usable_check_params ucp = {0};
int r = 1;
int r;
/* check if the device is not too small to hold a PV */
switch (mode) {
case FILTER_MODE_NO_LVMETAD:
/* fall through */
case FILTER_MODE_PRE_LVMETAD:
if (!_check_pv_min_size(dev))
return 0;
break;
case FILTER_MODE_POST_LVMETAD:
/* nothing to do here */
break;
}
/* further checks are done on dm devices only */
if (dm_is_dm_major(MAJOR(dev->dev))) {
switch (mode) {
if (!dm_is_dm_major(MAJOR(dev->dev)))
return 1;
switch (mode) {
case FILTER_MODE_NO_LVMETAD:
ucp.check_empty = 1;
ucp.check_blocked = 1;
@@ -145,25 +163,10 @@ static int _passes_usable_filter(struct dev_filter *f, struct device *dev)
ucp.check_error_target = 0;
ucp.check_reserved = 0;
break;
}
if (!(r = device_is_usable(dev, ucp)))
log_debug_devs("%s: Skipping unusable device.", dev_name(dev));
}
if (r) {
/* check if the device is not too small to hold a PV */
switch (mode) {
case FILTER_MODE_NO_LVMETAD:
/* fall through */
case FILTER_MODE_PRE_LVMETAD:
r = _check_pv_min_size(dev);
break;
case FILTER_MODE_POST_LVMETAD:
/* nothing to do here */
break;
}
}
if (!(r = device_is_usable(dev, ucp)))
log_debug_devs("%s: Skipping unusable device", dev_name(dev));
return r;
}

View File

@@ -164,7 +164,7 @@ int export_pv(struct cmd_context *cmd, struct dm_pool *mem __attribute__((unused
/* Is VG already exported or being exported? */
if (vg && vg_is_exported(vg)) {
/* Does system_id need setting? */
if (!vg->lvm1_system_id || !*vg->lvm1_system_id ||
if ((vg->lvm1_system_id && !*vg->lvm1_system_id) ||
strncmp(vg->lvm1_system_id, EXPORTED_TAG,
sizeof(EXPORTED_TAG) - 1)) {
if (!generate_lvm1_system_id(cmd, (char *)pvd->system_id, EXPORTED_TAG))

View File

@@ -219,7 +219,7 @@ static int _read_linear(struct cmd_context *cmd, struct lv_map *lvm)
struct lv_segment *seg;
struct segment_type *segtype;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(cmd, "striped")))
return_0;
while (le < lvm->lv->le_count) {
@@ -281,7 +281,7 @@ static int _read_stripes(struct cmd_context *cmd, struct lv_map *lvm)
total_area_len = lvm->lv->le_count / lvm->stripes;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(cmd, "striped")))
return_0;
while (first_area_le < total_area_len) {

View File

@@ -188,7 +188,8 @@ static int _add_stripe_seg(struct dm_pool *mem,
area_len = (usp->devs[0].blocks) / POOL_PE_SIZE;
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(lv->vg->cmd,
"striped")))
return_0;
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
@@ -225,7 +226,7 @@ static int _add_linear_seg(struct dm_pool *mem,
unsigned j;
uint32_t area_len;
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "striped")))
return_0;
for (j = 0; j < usp->num_devs; j++) {

View File

@@ -453,9 +453,8 @@ static struct raw_locn *_find_vg_rlocn(struct device_area *dev_area,
"not match expected name %s.", vgname);
bad:
if ((info = lvmcache_info_from_pvid(dev_area->dev->pvid, 0)) &&
!lvmcache_update_vgname_and_id(info, &vgsummary_orphan))
stack;
if ((info = lvmcache_info_from_pvid(dev_area->dev->pvid, 0)))
lvmcache_update_vgname_and_id(info, &vgsummary_orphan);
return NULL;
}
@@ -654,7 +653,7 @@ static int _vg_write_raw(struct format_instance *fid, struct volume_group *vg,
if ((new_wrap && old_wrap) ||
(rlocn && (new_wrap || old_wrap) && (new_end > rlocn->offset)) ||
(MDA_HEADER_SIZE + (rlocn ? rlocn->size : 0) + mdac->rlocn.size >= mdah->size)) {
(mdac->rlocn.size >= mdah->size)) {
log_error("VG %s metadata too large for circular buffer",
vg->name);
goto out;
@@ -1339,7 +1338,6 @@ static int _text_pv_write(const struct format_type *fmt, struct physical_volume
label = lvmcache_get_label(info);
label->sector = pv->label_sector;
label->dev = pv->dev;
lvmcache_update_pv(info, pv, fmt);
@@ -2146,6 +2144,7 @@ static int _text_pv_add_metadata_area(const struct format_type *fmt,
goto bad;
}
/* Otherwise, give up and take any usable space. */
/* FIXME: We should probably check for some minimum MDA size here. */
else
mda_size = limit - mda_start;
@@ -2242,12 +2241,6 @@ static int _text_pv_add_metadata_area(const struct format_type *fmt,
mda_size, limit_name, limit);
if (mda_size) {
if (mda_size < MDA_SIZE_MIN) {
log_error("Metadata area size too small: %" PRIu64" bytes. "
"It must be at least %u bytes.", mda_size, MDA_SIZE_MIN);
goto bad;
}
/* Wipe metadata area with zeroes. */
if (!dev_set((struct device *) pv->dev, mda_start,
(size_t) ((mda_size > wipe_size) ?

View File

@@ -375,7 +375,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
return 0;
}
segtype_str = SEG_TYPE_NAME_STRIPED;
segtype_str = "striped";
if (!dm_config_get_str(sn_child, "type", &segtype_str)) {
log_error("Segment type must be a string.");

View File

@@ -33,7 +33,7 @@ struct segment_type *init_free_segtype(struct cmd_context *cmd)
return_NULL;
segtype->ops = &_freeseg_ops;
segtype->name = SEG_TYPE_NAME_FREE;
segtype->name = "free";
segtype->flags = SEG_VIRTUAL | SEG_CANNOT_BE_ZEROED;
log_very_verbose("Initialised segtype: %s", segtype->name);

View File

@@ -105,8 +105,7 @@ static void _update_lvmcache_orphan(struct lvmcache_info *info)
memcpy(&vgsummary_orphan.vgid, lvmcache_fmt(info)->orphan_vg_name, strlen(lvmcache_fmt(info)->orphan_vg_name));
if (!lvmcache_update_vgname_and_id(info, &vgsummary_orphan))
stack;
lvmcache_update_vgname_and_id(info, &vgsummary_orphan);
}
static struct labeller *_find_labeller(struct device *dev, char *buf,

View File

@@ -118,6 +118,12 @@ static void _flags_str_to_lockd_flags(const char *flags_str, uint32_t *lockd_fla
if (strstr(flags_str, "DUP_GL_LS"))
*lockd_flags |= LD_RF_DUP_GL_LS;
if (strstr(flags_str, "INACTIVE_LS"))
*lockd_flags |= LD_RF_INACTIVE_LS;
if (strstr(flags_str, "ADD_LS_ERROR"))
*lockd_flags |= LD_RF_ADD_LS_ERROR;
if (strstr(flags_str, "WARN_GL_REMOVED"))
*lockd_flags |= LD_RF_WARN_GL_REMOVED;
}
@@ -337,7 +343,7 @@ static int _create_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg,
dm_list_init(&lp.tags);
if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(lp.segtype = get_segtype_from_string(vg->cmd, "striped")))
return_0;
lv = lv_create_single(vg, &lp);
@@ -688,43 +694,7 @@ out:
static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
int result;
int ret;
if (!_use_lvmlockd)
return 0;
if (!_lvmlockd_connected)
return 0;
reply = _lockd_send("free_vg",
"pid = %d", getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", vg->lock_type,
"vg_lock_args = %s", vg->lock_args,
NULL);
if (!_lockd_result(reply, &result, &lockd_flags)) {
ret = 0;
} else {
ret = (result < 0) ? 0 : 1;
}
if (!ret)
log_error("_free_vg_dlm lvmlockd result %d", result);
daemon_reply_destroy(reply);
return 1;
}
/* called before vg_remove on disk */
static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
uint32_t lockd_flags;
int result;
int ret;
@@ -734,33 +704,25 @@ static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
return 0;
/*
* Check that other hosts do not have the VG lockspace started.
* Unlocking the vg lock here preempts the lvmlockd unlock in
* toollib.c which happens too late since the lockspace is
* left here.
*/
reply = _lockd_send("busy_vg",
"pid = %d", getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", vg->lock_type,
"vg_lock_args = %s", vg->lock_args,
NULL);
/* Equivalent to a standard unlock. */
ret = _lockd_request(cmd, "lock_vg",
vg->name, NULL, NULL, NULL, NULL, NULL, "un", NULL,
&result, &lockd_flags);
if (!_lockd_result(reply, &result, &lockd_flags)) {
ret = 0;
} else {
ret = (result < 0) ? 0 : 1;
if (!ret || result < 0) {
log_error("_free_vg_dlm lvmlockd result %d", result);
return 0;
}
if (result == -EBUSY) {
log_error("Lockspace for \"%s\" not stopped on other hosts", vg->name);
goto out;
}
/* Leave the dlm lockspace. */
lockd_stop_vg(cmd, vg);
if (!ret)
log_error("_busy_vg_dlm lvmlockd result %d", result);
out:
daemon_reply_destroy(reply);
return ret;
return 1;
}
/* called before vg_remove on disk */
@@ -777,19 +739,6 @@ static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
if (!_lvmlockd_connected)
return 0;
/*
* vgremove originally held the global lock, but lost it because the
* vgremove command is removing multiple VGs, and removed the VG
* holding the global lock before attempting to remove this VG.
* To avoid this situation, the user should remove the VG holding
* the global lock in a command by itself, or as the last arg in a
* vgremove command that removes multiple VGs.
*/
if (cmd->lockd_gl_removed) {
log_error("Global lock failed: global lock was lost by removing a previous VG.");
return 0;
}
if (!vg->lock_args || !strlen(vg->lock_args)) {
/* Shouldn't happen in general, but maybe in some error cases? */
log_debug("_free_vg_sanlock %s no lock_args", vg->name);
@@ -824,21 +773,8 @@ static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
goto out;
}
/*
* If the global lock was been removed by removing this VG, then:
*
* Print a warning indicating that the global lock should be enabled
* in another remaining sanlock VG.
*
* Do not allow any more VGs to be removed by this command, e.g.
* if a command removes two sanlock VGs, like vgremove foo bar,
* and the global lock existed in foo, do not continue to remove
* VG bar without the global lock. See the corresponding check above.
*/
if (lockd_flags & LD_RF_WARN_GL_REMOVED) {
if (lockd_flags & LD_RF_WARN_GL_REMOVED)
log_warn("VG %s held the sanlock global lock, enable global lock in another VG.", vg->name);
cmd->lockd_gl_removed = 1;
}
/*
* The usleep delay gives sanlock time to close the lock lv,
@@ -854,6 +790,37 @@ static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
return ret;
}
/*
* Tell lvmlockd to forget about an old VG name.
* lvmlockd remembers previous lockd VGs so that it can provide more
* informative error messages (see INACTIVE_LS, ADD_LS_ERROR).
*
* If a new local VG is created with the same name as a previous lockd VG,
* lvmlockd's memory of the previous lockd VG interferes (causes incorrect
* lockd_vg failures).
*
* We could also remove the list of inactive (old) VG names from lvmlockd,
* and then this function would not be needed, but this would also reduce
* the ability to have helpful error messages.
*/
static void _forget_vg_name(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
if (!_use_lvmlockd)
return;
if (!_lvmlockd_connected)
return;
reply = _lockd_send("forget_vg_name",
"pid = %d", getpid(),
"vg_name = %s", vg->name,
NULL);
daemon_reply_destroy(reply);
}
/* vgcreate */
int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
@@ -861,6 +828,7 @@ int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
{
switch (get_lock_type_from_string(lock_type)) {
case LOCK_TYPE_NONE:
_forget_vg_name(cmd, vg);
return 1;
case LOCK_TYPE_CLVM:
return 1;
@@ -874,55 +842,15 @@ int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
}
}
static int _lockd_all_lvs(struct cmd_context *cmd, struct volume_group *vg)
{
struct lv_list *lvl;
dm_list_iterate_items(lvl, &vg->lvs) {
if (!lockd_lv_uses_lock(lvl->lv))
continue;
if (!lockd_lv(cmd, lvl->lv, "ex", 0)) {
log_error("LV %s/%s must be inactive on all hosts.",
vg->name, lvl->lv->name);
return 0;
}
if (!lockd_lv(cmd, lvl->lv, "un", 0)) {
log_error("Failed to unlock LV %s/%s.", vg->name, lvl->lv->name);
return 0;
}
}
return 1;
}
/* vgremove before the vg is removed */
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
int changing)
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg)
{
int lock_type_num = get_lock_type_from_string(vg->lock_type);
/*
* Check that no LVs are active on other hosts.
* When removing (not changing), each LV is locked
* when it is removed, they do not need checking here.
*/
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK) {
if (changing && !_lockd_all_lvs(cmd, vg)) {
log_error("Cannot change VG %s with active LVs", vg->name);
return 0;
}
}
switch (lock_type_num) {
switch (get_lock_type_from_string(vg->lock_type)) {
case LOCK_TYPE_NONE:
case LOCK_TYPE_CLVM:
return 1;
case LOCK_TYPE_DLM:
/* returning an error will prevent vg_remove() */
return _busy_vg_dlm(cmd, vg);
return 1;
case LOCK_TYPE_SANLOCK:
/* returning an error will prevent vg_remove() */
return _free_vg_sanlock(cmd, vg);
@@ -961,13 +889,9 @@ void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg)
* for starting the lockspace. To use the vg after starting
* the lockspace, follow the standard method which is:
* lock the vg, read/use/write the vg, unlock the vg.
*
* start_init is 1 when the VG is being started after the
* command has done lockd_init_vg(). This tells lvmlockd
* that the VG lockspace being started is new.
*/
int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_init)
int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg)
{
char uuid[64] __attribute__((aligned(8)));
daemon_reply reply;
@@ -989,8 +913,8 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
return 0;
}
log_debug("lockd start VG %s lock_type %s init %d",
vg->name, vg->lock_type ? vg->lock_type : "empty", start_init);
log_debug("lockd start VG %s lock_type %s",
vg->name, vg->lock_type ? vg->lock_type : "empty");
if (!id_write_format(&vg->id, uuid, sizeof(uuid)))
return_0;
@@ -1017,7 +941,6 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
"vg_uuid = %s", uuid[0] ? uuid : "none",
"version = %d", (int64_t)vg->seqno,
"host_id = %d", host_id,
"opts = %s", start_init ? "start_init" : "none",
NULL);
if (!_lockd_result(reply, &result, NULL)) {
@@ -1334,9 +1257,6 @@ int lockd_gl_create(struct cmd_context *cmd, const char *def_mode, const char *v
return 0;
}
/* --shared with vgcreate does not mean include_shared_vgs */
cmd->include_shared_vgs = 0;
lvmetad_validate_global_cache(cmd, 1);
return 1;
@@ -1437,7 +1357,6 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
const char *mode = NULL;
const char *opts = NULL;
uint32_t lockd_flags;
int force_cache_update = 0;
int retries = 0;
int result;
@@ -1482,8 +1401,8 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
/* We can continue reading if a shared lock fails. */
if (!strcmp(mode, "sh")) {
log_warn("Reading without shared global lock.");
force_cache_update = 1;
goto allow;
lvmetad_validate_global_cache(cmd, 1);
return 1;
}
log_error("Global lock failed: check that lvmlockd is running.");
@@ -1499,10 +1418,6 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
}
}
if (!strcmp(mode, "un"))
return 1;
/*
* ENOLS: no lockspace was found with a global lock.
* The VG with the global lock may not be visible or started yet,
@@ -1510,30 +1425,20 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
*
* ESTARTING: the lockspace with the gl is starting.
* The VG with the global lock is starting and should finish shortly.
*
* ELOCKIO: sanlock gets i/o errors when trying to read/write leases
* (This can progress to EVGKILLED.)
*
* EVGKILLED: the sanlock lockspace is being killed after losing
* access to lease storage.
*/
if (result == -ENOLS ||
result == -ESTARTING ||
result == -EVGKILLED ||
result == -ELOCKIO) {
if (result == -ENOLS || result == -ESTARTING) {
if (!strcmp(mode, "un"))
return 1;
/*
* If an ex global lock fails, then the command fails.
*/
if (strcmp(mode, "sh")) {
if (result == -ESTARTING)
log_error("Global lock failed: lockspace is starting");
log_error("Global lock failed: lockspace is starting.");
else if (result == -ENOLS)
log_error("Global lock failed: check that global lockspace is started");
else if (result == -ELOCKIO)
log_error("Global lock failed: storage errors for sanlock leases");
else if (result == -EVGKILLED)
log_error("Global lock failed: storage failed for sanlock leases");
log_error("Global lock failed: check that global lockspace is started.");
else
log_error("Global lock failed: error %d", result);
return 0;
@@ -1547,21 +1452,14 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
if (result == -ESTARTING) {
log_warn("Skipping global lock: lockspace is starting");
force_cache_update = 1;
goto allow;
}
if (result == -ELOCKIO || result == -EVGKILLED) {
log_warn("Skipping global lock: storage %s for sanlock leases",
result == -ELOCKIO ? "errors" : "failed");
force_cache_update = 1;
goto allow;
lvmetad_validate_global_cache(cmd, 1);
return 1;
}
if ((lockd_flags & LD_RF_NO_GL_LS) || (lockd_flags & LD_RF_NO_LOCKSPACES)) {
log_warn("Skipping global lock: lockspace not found or started");
force_cache_update = 1;
goto allow;
lvmetad_validate_global_cache(cmd, 1);
return 1;
}
/*
@@ -1577,7 +1475,11 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
log_warn("Duplicate sanlock global locks should be corrected");
if (result < 0) {
if (result == -EAGAIN) {
if (ignorelockingfailure()) {
log_debug("Ignore failed locking for global lock");
lvmetad_validate_global_cache(cmd, 1);
return 1;
} else if (result == -EAGAIN) {
/*
* Most of the time, retries should avoid this case.
*/
@@ -1594,8 +1496,9 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
}
}
allow:
lvmetad_validate_global_cache(cmd, force_cache_update);
if (!(flags & LDGL_SKIP_CACHE_VALIDATE))
lvmetad_validate_global_cache(cmd, 0);
return 1;
}
@@ -1611,7 +1514,7 @@ int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
*
* The result of the VG lock operation needs to be saved in lockd_state
* because the result needs to be passed into vg_read so it can be
* assessed in combination with vg->lock_type.
* assessed in combination with vg->lock_state.
*
* The VG lock protects the VG metadata on disk from concurrent access
* among hosts. The VG lock also ensures that the local lvmetad cache
@@ -1645,31 +1548,8 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
int result;
int ret;
/*
* The result of the VG lock request is saved in lockd_state to be
* passed into vg_read where the lock result is needed once we
* know if this is a local VG or lockd VG.
*/
*lockd_state = 0;
/*
* Use of lockd_vg_rescan.
*
* This is the VG equivalent of using lvmetad_validate_global_cache()
* for the global lock (after failing to acquire the global lock). If
* we fail to acquire the VG lock from lvmlockd, then the lvmlockd
* mechanism has been missed that would have updated the cached lvmetad
* copy of the VG. So, set lockd_vg_rescan to tell the VG reading code
* to treat the lvmetad copy as if the invalid flag had been returned.
* i.e. If a lockd VG is read without a lock, ignore the lvmetad copy
* and read it from disk since we don't know if the cache is stale.
*
* Because lvmlockd requests return an error for local VGs, this will
* be set for local VGs, but it ends up being ignored once the VG is
* read and found to be a local VG.
*/
cmd->lockd_vg_rescan = 0;
if (!is_real_vg(vg_name))
return 1;
@@ -1740,7 +1620,6 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
*/
if (!_use_lvmlockd) {
*lockd_state |= LDST_FAIL_REQUEST;
cmd->lockd_vg_rescan = 1;
return 1;
}
@@ -1757,7 +1636,6 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
* this error for local VGs, but we do care for lockd VGs.
*/
*lockd_state |= LDST_FAIL_REQUEST;
cmd->lockd_vg_rescan = 1;
return 1;
}
@@ -1776,15 +1654,12 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
break;
case -ENOLS:
*lockd_state |= LDST_FAIL_NOLS;
cmd->lockd_vg_rescan = 1;
break;
case -ESTARTING:
*lockd_state |= LDST_FAIL_STARTING;
cmd->lockd_vg_rescan = 1;
break;
default:
*lockd_state |= LDST_FAIL_OTHER;
cmd->lockd_vg_rescan = 1;
}
/*
@@ -1795,16 +1670,6 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
goto out;
}
/*
* The VG has been removed. This will only happen with a dlm VG
* since a sanlock VG must be stopped everywhere before it's removed.
*/
if (result == -EREMOVED) {
log_error("VG %s lock failed: removed", vg_name);
ret = 1;
goto out;
}
/*
* The lockspace for the VG is starting (the VG must not
* be local), and is not yet ready to do locking. Allow
@@ -1826,22 +1691,40 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
}
/*
* sanlock is getting i/o errors while reading/writing leases, or the
* lockspace/VG is being killed after failing to renew its lease for
* too long.
* An unused/previous lockspace for the VG was found.
* This means it must be a lockd VG, not local. The
* lockspace needs to be started to be used.
*/
if (result == -EVGKILLED || result == -ELOCKIO) {
const char *problem = (result == -ELOCKIO ? "errors" : "failed");
if ((result == -ENOLS) && (lockd_flags & LD_RF_INACTIVE_LS)) {
if (!strcmp(mode, "un")) {
ret = 1;
goto out;
} else if (!strcmp(mode, "sh")) {
log_warn("VG %s lock skipped: storage %s for sanlock leases", vg_name, problem);
log_warn("VG %s lock skipped: lockspace is inactive", vg_name);
ret = 1;
goto out;
} else {
log_error("VG %s lock failed: storage %s for sanlock leases", vg_name, problem);
log_error("VG %s lock failed: lockspace is inactive", vg_name);
ret = 0;
goto out;
}
}
/*
* An unused lockspace for the VG was found. The previous
* start of the lockspace failed, so we can print a more useful
* error message.
*/
if ((result == -ENOLS) && (lockd_flags & LD_RF_ADD_LS_ERROR)) {
if (!strcmp(mode, "un")) {
ret = 1;
goto out;
} else if (!strcmp(mode, "sh")) {
log_warn("VG %s lock skipped: lockspace start error", vg_name);
ret = 1;
goto out;
} else {
log_error("VG %s lock failed: lockspace start error", vg_name);
ret = 0;
goto out;
}
@@ -1887,6 +1770,11 @@ out:
if ((lockd_flags & LD_RF_DUP_GL_LS) && strcmp(mode, "un"))
log_warn("Duplicate sanlock global lock in VG %s", vg_name);
if (!ret && ignorelockingfailure()) {
log_debug("Ignore failed locking for VG %s", vg_name);
return 1;
}
return ret;
}
@@ -2010,15 +1898,6 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
return 0;
}
if (result == -EEXIST) {
/*
* This happens if lvchange tries to modify the LV with an ex
* LV lock when the LV is already active with a sh LV lock.
*/
log_error("LV is already locked with incompatible mode: %s/%s", vg->name, lv_name);
return 0;
}
if (result == -EMSGSIZE) {
/* Another host probably extended lvmlock. */
if (!refreshed++) {
@@ -2033,12 +1912,6 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
return 0;
}
if (result == -EVGKILLED || result == -ELOCKIO) {
const char *problem = (result == -ELOCKIO ? "errors" : "failed");
log_error("LV %s/%s lock failed: storage %s for sanlock leases", vg->name, lv_name, problem);
return 0;
}
if (result < 0) {
log_error("LV %s/%s lock failed: error %d", vg->name, lv_name, result);
return 0;
@@ -2435,6 +2308,7 @@ int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
int lockd_rename_vg_before(struct cmd_context *cmd, struct volume_group *vg)
{
struct lv_list *lvl;
daemon_reply reply;
int result;
int ret;
@@ -2452,9 +2326,18 @@ int lockd_rename_vg_before(struct cmd_context *cmd, struct volume_group *vg)
}
/* Check that no LVs are active on other hosts. */
if (!_lockd_all_lvs(cmd, vg)) {
log_error("Cannot rename VG %s with active LVs", vg->name);
return 0;
dm_list_iterate_items(lvl, &vg->lvs) {
if (!lockd_lv(cmd, lvl->lv, "ex", 0)) {
log_error("LV %s/%s must be inactive on all hosts before vgrename.",
vg->name, lvl->lv->name);
return 0;
}
if (!lockd_lv(cmd, lvl->lv, "un", 0)) {
log_error("Failed to unlock LV %s/%s.", vg->name, lvl->lv->name);
return 0;
}
}
/*
@@ -2477,12 +2360,6 @@ int lockd_rename_vg_before(struct cmd_context *cmd, struct volume_group *vg)
}
daemon_reply_destroy(reply);
/* Other hosts have not stopped the lockspace. */
if (result == -EBUSY) {
log_error("Lockspace for \"%s\" not stopped on other hosts", vg->name);
return 0;
}
if (!ret) {
log_error("lockd_rename_vg_before lvmlockd result %d", result);
@@ -2515,7 +2392,7 @@ int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_group *vg, int
* Depending on the problem that caused the rename to
* fail, it may make sense to not restart the VG here.
*/
if (!lockd_start_vg(cmd, vg, 0))
if (!lockd_start_vg(cmd, vg))
log_error("Failed to restart VG %s lockspace.", vg->name);
return 1;
}
@@ -2555,13 +2432,13 @@ int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_group *vg, int
}
}
if (!lockd_start_vg(cmd, vg, 1))
if (!lockd_start_vg(cmd, vg))
log_error("Failed to start VG %s lockspace.", vg->name);
return 1;
}
const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple)
const char *lockd_running_lock_type(struct cmd_context *cmd)
{
daemon_reply reply;
const char *lock_type = NULL;
@@ -2583,9 +2460,10 @@ const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple
switch (result) {
case -EXFULL:
*found_multiple = 1;
log_error("lvmlockd found multiple lock managers, use --lock-type to select one.");
break;
case -ENOLCK:
log_error("lvmlockd found no lock manager running.");
break;
case LOCK_TYPE_SANLOCK:
log_debug("lvmlockd found sanlock");

View File

@@ -17,7 +17,8 @@
#define LOCKD_SANLOCK_LV_NAME "lvmlock"
/* lockd_gl flags */
#define LDGL_UPDATE_NAMES 0x00000001
#define LDGL_SKIP_CACHE_VALIDATE 0x00000001
#define LDGL_UPDATE_NAMES 0x00000002
/* lockd_lv flags */
#define LDLV_MODE_NO_SH 0x00000001
@@ -28,6 +29,8 @@
#define LD_RF_NO_GL_LS 0x00000002
#define LD_RF_WARN_GL_REMOVED 0x00000004
#define LD_RF_DUP_GL_LS 0x00000008
#define LD_RF_INACTIVE_LS 0x00000010
#define LD_RF_ADD_LS_ERROR 0x00000020
/* lockd_state flags */
#define LDST_EX 0x00000001
@@ -52,7 +55,7 @@ void lvmlockd_disconnect(void);
/* vgcreate/vgremove use init/free */
int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg, const char *lock_type, int lv_lock_count);
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg, int changing);
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg);
void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg);
/* vgrename */
@@ -62,7 +65,7 @@ int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_group *vg, int
/* start and stop the lockspace for a vg */
int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_init);
int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg);
int lockd_stop_vg(struct cmd_context *cmd, struct volume_group *vg);
int lockd_start_wait(struct cmd_context *cmd);
@@ -89,7 +92,7 @@ int lockd_init_lv_args(struct cmd_context *cmd, struct volume_group *vg,
int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id, const char *lock_args);
const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple);
const char *lockd_running_lock_type(struct cmd_context *cmd);
int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg);
@@ -127,7 +130,7 @@ static inline int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg
return 1;
}
static inline int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg, int changing)
static inline int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg)
{
return 1;
}
@@ -147,7 +150,7 @@ static inline int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_g
return 1;
}
static inline int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_init)
static inline int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg)
{
return 0;
}
@@ -223,7 +226,7 @@ static inline int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg
return 1;
}
static inline const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple)
static inline const char *lockd_running_lock_type(struct cmd_context *cmd)
{
log_error("Using a shared lock type requires lvmlockd.");
return NULL;

View File

@@ -19,6 +19,8 @@
#include "lvm-string.h"
#include "activate.h"
#include <signal.h>
/*
* No locking
*/

View File

@@ -49,7 +49,10 @@ static size_t _lvm_errmsg_len = 0;
void init_log_fn(lvm2_log_fn_t log_fn)
{
_lvm2_log_fn = log_fn;
if (log_fn)
_lvm2_log_fn = log_fn;
else
_lvm2_log_fn = NULL;
}
/*
@@ -66,7 +69,7 @@ void init_log_file(const char *log_file, int append)
static const char statfile[] = "/proc/self/stat";
const char *env;
int pid;
unsigned long long starttime;
long long starttime;
FILE *st;
int i = 0;

View File

@@ -16,9 +16,9 @@
#ifndef _LVM_LOGGING_H
#define _LVM_LOGGING_H
__attribute__ ((format(printf, 5, 6)))
void print_log(int level, const char *file, int line, int dm_errno_or_class,
const char *format, ...);
const char *format, ...)
__attribute__ ((format(printf, 5, 6)));
#define LOG_LINE(l, x...) \
print_log(l, __FILE__, __LINE__ , 0, ## x)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2014 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -29,17 +29,7 @@
#define DM_HINT_OVERHEAD_PER_BLOCK 8 /* bytes */
#define DM_MAX_HINT_WIDTH (4+16) /* bytes. FIXME Configurable? */
int cache_mode_is_set(const struct lv_segment *seg)
{
if (seg_is_cache(seg))
seg = first_seg(seg->pool_lv);
return (seg->feature_flags & (DM_CACHE_FEATURE_WRITEBACK |
DM_CACHE_FEATURE_WRITETHROUGH |
DM_CACHE_FEATURE_PASSTHROUGH)) ? 1 : 0;
}
const char *get_cache_mode_name(const struct lv_segment *seg)
const char *get_cache_pool_cachemode_name(const struct lv_segment *seg)
{
if (seg->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
return "writeback";
@@ -56,68 +46,22 @@ const char *get_cache_mode_name(const struct lv_segment *seg)
return NULL;
}
int cache_set_mode(struct lv_segment *seg, const char *str)
int set_cache_pool_feature(uint64_t *feature_flags, const char *str)
{
struct cmd_context *cmd = seg->lv->vg->cmd;
int id;
uint64_t mode;
if (!str && !seg_is_cache(seg))
return 1; /* Defaults only for cache */
if (seg_is_cache(seg))
seg = first_seg(seg->pool_lv);
if (!str) {
if (cache_mode_is_set(seg))
return 1; /* Default already set in cache pool */
id = allocation_cache_mode_CFG;
/* If present, check backward compatible settings */
if (!find_config_node(cmd, cmd->cft, id) &&
find_config_node(cmd, cmd->cft, allocation_cache_pool_cachemode_CFG))
id = allocation_cache_pool_cachemode_CFG;
str = find_config_tree_str(cmd, id, NULL);
}
if (!strcmp(str, "writeback"))
mode = DM_CACHE_FEATURE_WRITEBACK;
*feature_flags |= DM_CACHE_FEATURE_WRITEBACK;
else if (!strcmp(str, "writethrough"))
mode = DM_CACHE_FEATURE_WRITETHROUGH;
else if (!strcmp(str, "passthrough"))
mode = DM_CACHE_FEATURE_PASSTHROUGH;
*feature_flags |= DM_CACHE_FEATURE_WRITETHROUGH;
else if (!strcmp(str, "passhrough"))
*feature_flags |= DM_CACHE_FEATURE_PASSTHROUGH;
else {
log_error("Cannot set unknown cache mode \"%s\".", str);
log_error("Cache pool feature \"%s\" is unknown.", str);
return 0;
}
seg->feature_flags &= ~(DM_CACHE_FEATURE_WRITEBACK |
DM_CACHE_FEATURE_WRITETHROUGH |
DM_CACHE_FEATURE_PASSTHROUGH);
seg->feature_flags |= mode;
return 1;
}
/*
* At least warn a user if certain cache stacks may present some problems
*/
void cache_check_for_warns(const struct lv_segment *seg)
{
struct logical_volume *origin_lv = seg_lv(seg, 0);
if (lv_is_raid(origin_lv) &&
first_seg(seg->pool_lv)->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
log_warn("WARNING: Data redundancy is lost with writeback "
"caching of raid logical volume!");
if (lv_is_thin_pool_data(seg->lv))
log_warn("WARNING: Cached thin pool's data cannot be currently "
"resized and require manual uncache before resize!");
}
int update_cache_pool_params(const struct segment_type *segtype,
struct volume_group *vg, unsigned attr,
int passed_args, uint32_t pool_data_extents,
@@ -261,7 +205,7 @@ struct logical_volume *lv_cache_create(struct logical_volume *pool_lv,
if (lv_is_thin_pool(cache_lv))
cache_lv = seg_lv(first_seg(cache_lv), 0); /* cache _tdata */
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_CACHE)))
if (!(segtype = get_segtype_from_string(cmd, "cache")))
return_NULL;
if (!insert_layer_for_lv(cmd, cache_lv, CACHE, "_corig"))
@@ -407,7 +351,7 @@ int lv_cache_remove(struct logical_volume *cache_lv)
/* Replace 'error' with 'cache' segtype */
cache_seg = first_seg(corigin_lv);
if (!(cache_seg->segtype = get_segtype_from_string(corigin_lv->vg->cmd, SEG_TYPE_NAME_CACHE)))
if (!(cache_seg->segtype = get_segtype_from_string(corigin_lv->vg->cmd, "cache")))
return_0;
if (!(cache_seg->areas = dm_pool_zalloc(cache_lv->vg->vgmem, sizeof(*cache_seg->areas))))
@@ -451,103 +395,36 @@ int lv_is_cache_origin(const struct logical_volume *lv)
return seg && lv_is_cache(seg->lv) && !lv_is_pending_delete(seg->lv) && (seg_lv(seg, 0) == lv);
}
static const char *_get_default_cache_policy(struct cmd_context *cmd)
{
const struct segment_type *segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_CACHE);
unsigned attr = ~0;
const char *def = NULL;
if (!segtype ||
!segtype->ops->target_present ||
!segtype->ops->target_present(cmd, NULL, &attr)) {
log_warn("WARNING: Cannot detect default cache policy, using \""
DEFAULT_CACHE_POLICY "\".");
return DEFAULT_CACHE_POLICY;
}
if (attr & CACHE_FEATURE_POLICY_SMQ)
def = "smq";
else if (attr & CACHE_FEATURE_POLICY_MQ)
def = "mq";
else {
log_error("Default cache policy is not available.");
return NULL;
}
log_debug_metadata("Detected default cache_policy \"%s\".", def);
return def;
}
int cache_set_policy(struct lv_segment *seg, const char *name,
const struct dm_config_tree *settings)
int lv_cache_set_policy(struct logical_volume *lv, const char *name,
const struct dm_config_tree *settings)
{
struct dm_config_node *cn;
const struct dm_config_node *cns;
struct dm_config_tree *old = NULL, *new = NULL, *tmp = NULL;
int r = 0;
const int passed_seg_is_cache = seg_is_cache(seg);
struct lv_segment *seg = first_seg(lv);
if (passed_seg_is_cache)
if (lv_is_cache(lv))
seg = first_seg(seg->pool_lv);
if (name) {
if (!(seg->policy_name = dm_pool_strdup(seg->lv->vg->vgmem, name))) {
log_error("Failed to duplicate policy name.");
return 0;
}
} else if (!seg->policy_name && passed_seg_is_cache) {
if (!(seg->policy_name = find_config_tree_str(seg->lv->vg->cmd, allocation_cache_policy_CFG, NULL)) &&
!(seg->policy_name = _get_default_cache_policy(seg->lv->vg->cmd)))
return_0;
if (seg->policy_settings) {
if (!(old = dm_config_create()))
goto_out;
if (!(new = dm_config_create()))
goto_out;
new->root = settings->root;
old->root = seg->policy_settings;
new->cascade = old;
if (!(tmp = dm_config_flatten(new)))
goto_out;
}
if (settings) {
if (!seg->policy_name) {
log_error(INTERNAL_ERROR "Can't set policy settings without policy name.");
return 0;
}
if ((cn = dm_config_find_node((tmp) ? tmp->root : settings->root, "policy_settings")) &&
!(seg->policy_settings = dm_config_clone_node_with_mem(lv->vg->vgmem, cn, 0)))
goto_out;
if (seg->policy_settings) {
if (!(old = dm_config_create()))
goto_out;
if (!(new = dm_config_create()))
goto_out;
new->root = settings->root;
old->root = seg->policy_settings;
new->cascade = old;
if (!(tmp = dm_config_flatten(new)))
goto_out;
}
if ((cn = dm_config_find_node((tmp) ? tmp->root : settings->root, "policy_settings")) &&
!(seg->policy_settings = dm_config_clone_node_with_mem(seg->lv->vg->vgmem, cn, 0)))
goto_out;
} else if (passed_seg_is_cache && /* Look for command's profile cache_policies */
(cns = find_config_tree_node(seg->lv->vg->cmd, allocation_cache_settings_CFG_SECTION, NULL))) {
/* Try to find our section for given policy */
for (cn = cns->child; cn; cn = cn->sib) {
/* Only matching section names */
if (cn->v || strcmp(cn->key, seg->policy_name) != 0)
continue;
if (!cn->child)
break;
if (!(new = dm_config_create()))
goto_out;
if (!(new->root = dm_config_clone_node_with_mem(new->mem,
cn->child, 1)))
goto_out;
if (!(seg->policy_settings = dm_config_create_node(new, "policy_settings")))
goto_out;
seg->policy_settings->child = new->root;
break; /* Only first match counts */
}
if (name && !(seg->policy_name = dm_pool_strdup(lv->vg->vgmem, name))) {
log_error("Failed to duplicate policy name.");
goto out;
}
restart: /* remove any 'default" nodes */

View File

@@ -29,7 +29,7 @@ static struct utsname _utsname;
static int _utsinit = 0;
static char *_format_pvsegs(struct dm_pool *mem, const struct lv_segment *seg,
int range_format, int metadata_areas_only)
int range_format)
{
unsigned int s;
const char *name = NULL;
@@ -41,19 +41,13 @@ static char *_format_pvsegs(struct dm_pool *mem, const struct lv_segment *seg,
return NULL;
}
if (metadata_areas_only && (!seg_is_raid(seg) || lv_is_raid_metadata(seg->lv) || lv_is_raid_image(seg->lv)))
goto out;
for (s = 0; s < seg->area_count; s++) {
switch (metadata_areas_only ? seg_metatype(seg, s) : seg_type(seg, s)) {
switch (seg_type(seg, s)) {
case AREA_LV:
name = metadata_areas_only ? seg_metalv(seg, s)->name : seg_lv(seg, s)->name;
extent = metadata_areas_only ? seg_le(seg, s) : 0;
name = seg_lv(seg, s)->name;
extent = seg_le(seg, s);
break;
case AREA_PV:
/* Raid metadata never uses PVs directly */
if (metadata_areas_only)
continue;
name = dev_name(seg_dev(seg, s));
extent = seg_pe(seg, s);
break;
@@ -85,7 +79,7 @@ static char *_format_pvsegs(struct dm_pool *mem, const struct lv_segment *seg,
if (range_format) {
if (dm_snprintf(extent_str, sizeof(extent_str),
FMTu32, metadata_areas_only ? extent + seg_metalv(seg, s)->le_count - 1 : extent + seg->area_len - 1) < 0) {
FMTu32, extent + seg->area_len - 1) < 0) {
log_error("Extent number dm_snprintf failed");
return NULL;
}
@@ -102,7 +96,6 @@ static char *_format_pvsegs(struct dm_pool *mem, const struct lv_segment *seg,
}
}
out:
if (!dm_pool_grow_object(mem, "\0", 1)) {
log_error("dm_pool_grow_object failed");
return NULL;
@@ -113,22 +106,12 @@ out:
char *lvseg_devices(struct dm_pool *mem, const struct lv_segment *seg)
{
return _format_pvsegs(mem, seg, 0, 0);
}
char *lvseg_metadata_devices(struct dm_pool *mem, const struct lv_segment *seg)
{
return _format_pvsegs(mem, seg, 0, 1);
return _format_pvsegs(mem, seg, 0);
}
char *lvseg_seg_pe_ranges(struct dm_pool *mem, const struct lv_segment *seg)
{
return _format_pvsegs(mem, seg, 1, 0);
}
char *lvseg_seg_metadata_le_ranges(struct dm_pool *mem, const struct lv_segment *seg)
{
return _format_pvsegs(mem, seg, 1, 1);
return _format_pvsegs(mem, seg, 1);
}
char *lvseg_tags_dup(const struct lv_segment *seg)
@@ -148,7 +131,7 @@ char *lvseg_discards_dup(struct dm_pool *mem, const struct lv_segment *seg)
char *lvseg_cachemode_dup(struct dm_pool *mem, const struct lv_segment *seg)
{
const char *name = get_cache_mode_name(seg);
const char *name = get_cache_pool_cachemode_name(seg);
if (!name)
return_NULL;
@@ -236,37 +219,21 @@ uint32_t lv_kernel_read_ahead(const struct logical_volume *lv)
return info.read_ahead;
}
static char *_do_lv_origin_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
{
struct logical_volume *origin;
if (lv_is_cow(lv))
origin = origin_from_cow(lv);
else if (lv_is_cache(lv) && first_seg(lv)->origin)
origin = first_seg(lv)->origin;
else if (lv_is_thin_volume(lv) && first_seg(lv)->origin)
origin = first_seg(lv)->origin;
else if (lv_is_thin_volume(lv) && first_seg(lv)->external_lv)
origin = first_seg(lv)->external_lv;
else
return NULL;
if (uuid)
return lv_uuid_dup(mem, origin);
else
return lv_name_dup(mem, origin);
}
char *lv_origin_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_origin_dup(mem, lv, 0);
}
if (lv_is_cow(lv))
return lv_name_dup(mem, origin_from_cow(lv));
char *lv_origin_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_origin_dup(mem, lv, 1);
if (lv_is_cache(lv) && first_seg(lv)->origin)
return lv_name_dup(mem, first_seg(lv)->origin);
if (lv_is_thin_volume(lv) && first_seg(lv)->origin)
return lv_name_dup(mem, first_seg(lv)->origin);
if (lv_is_thin_volume(lv) && first_seg(lv)->external_lv)
return lv_name_dup(mem, first_seg(lv)->external_lv);
return NULL;
}
char *lv_name_dup(struct dm_pool *mem, const struct logical_volume *lv)
@@ -325,112 +292,43 @@ char *lv_modules_dup(struct dm_pool *mem, const struct logical_volume *lv)
return tags_format_and_copy(mem, modules);
}
static char *_do_lv_mirror_log_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
{
struct lv_segment *seg;
dm_list_iterate_items(seg, &lv->segments) {
if (seg_is_mirrored(seg) && seg->log_lv) {
if (uuid)
return lv_uuid_dup(mem, seg->log_lv);
else
return lv_name_dup(mem, seg->log_lv);
}
}
return NULL;
}
char *lv_mirror_log_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_mirror_log_dup(mem, lv, 0);
}
char *lv_mirror_log_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_mirror_log_dup(mem, lv, 1);
}
static char *_do_lv_pool_lv_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
{
struct lv_segment *seg;
dm_list_iterate_items(seg, &lv->segments) {
if (seg->pool_lv &&
(seg_is_thin_volume(seg) || seg_is_cache(seg))) {
if (uuid)
return lv_uuid_dup(mem, seg->pool_lv);
else
return lv_name_dup(mem, seg->pool_lv);
}
}
dm_list_iterate_items(seg, &lv->segments)
if (seg_is_mirrored(seg) && seg->log_lv)
return dm_pool_strdup(mem, seg->log_lv->name);
return NULL;
}
char *lv_pool_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_pool_lv_dup(mem, lv, 0);
}
struct lv_segment *seg;
char *lv_pool_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_pool_lv_dup(mem, lv, 1);
}
static char *_do_lv_data_lv_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
{
struct lv_segment *seg = (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) ?
first_seg(lv) : NULL;
if (seg) {
if (uuid)
return lv_uuid_dup(mem, seg_lv(seg, 0));
else
return lv_name_dup(mem, seg_lv(seg, 0));
}
dm_list_iterate_items(seg, &lv->segments)
if (seg->pool_lv &&
(seg_is_thin_volume(seg) || seg_is_cache(seg)))
return dm_pool_strdup(mem, seg->pool_lv->name);
return NULL;
}
char *lv_data_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_data_lv_dup(mem, lv, 0);
}
char *lv_data_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_data_lv_dup(mem, lv, 1);
}
static char *_do_lv_metadata_lv_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
{
struct lv_segment *seg = (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) ?
first_seg(lv) : NULL;
if (seg) {
if (uuid)
return lv_uuid_dup(mem, seg->metadata_lv);
else
return lv_name_dup(mem, seg->metadata_lv);
}
return NULL;
return seg ? dm_pool_strdup(mem, seg_lv(seg, 0)->name) : NULL;
}
char *lv_metadata_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_metadata_lv_dup(mem, lv, 0);
}
struct lv_segment *seg = (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) ?
first_seg(lv) : NULL;
char *lv_metadata_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_metadata_lv_dup(mem, lv, 1);
return seg ? dm_pool_strdup(mem, seg->metadata_lv->name) : NULL;
}
const char *lv_layer(const struct logical_volume *lv)
@@ -460,8 +358,7 @@ int lv_kernel_major(const struct logical_volume *lv)
return -1;
}
static char *_do_lv_convert_lv_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
char *lv_convert_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
struct lv_segment *seg;
@@ -470,32 +367,17 @@ static char *_do_lv_convert_lv_dup(struct dm_pool *mem, const struct logical_vol
/* Temporary mirror is always area_num == 0 */
if (seg_type(seg, 0) == AREA_LV &&
is_temporary_mirror_layer(seg_lv(seg, 0))) {
if (uuid)
return lv_uuid_dup(mem, seg_lv(seg, 0));
else
return lv_name_dup(mem, seg_lv(seg, 0));
}
is_temporary_mirror_layer(seg_lv(seg, 0)))
return dm_pool_strdup(mem, seg_lv(seg, 0)->name);
}
return NULL;
}
char *lv_convert_lv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_convert_lv_dup(mem, lv, 0);
}
char *lv_convert_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_convert_lv_dup(mem, lv, 1);
}
static char *_do_lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume *lv,
int uuid)
char *lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
struct logical_volume *mimage0_lv;
struct lv_segment *seg;
struct pv_segment *pvseg;
const struct device *dev;
dm_list_iterate_items(seg, &lv->segments) {
if (seg->status & PVMOVE) {
@@ -506,30 +388,17 @@ static char *_do_lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume
"Bad pvmove structure");
return NULL;
}
pvseg = seg_pvseg(first_seg(mimage0_lv), 0);
dev = seg_dev(first_seg(mimage0_lv), 0);
} else /* Segment pvmove */
pvseg = seg_pvseg(seg, 0);
dev = seg_dev(seg, 0);
if (uuid)
return pv_uuid_dup(mem, pvseg->pv);
else
return pv_name_dup(mem, pvseg->pv);
return dm_pool_strdup(mem, dev_name(dev));
}
}
return NULL;
}
char *lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_move_pv_dup(mem, lv, 0);
}
char *lv_move_pv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return _do_lv_move_pv_dup(mem, lv, 1);
}
uint64_t lv_origin_size(const struct logical_volume *lv)
{
struct lv_segment *seg;
@@ -610,9 +479,9 @@ char *lv_dmpath_dup(struct dm_pool *mem, const struct logical_volume *lv)
return repstr;
}
char *lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
char *lv_uuid_dup(const struct logical_volume *lv)
{
return id_format_and_copy(mem ? mem : lv->vg->vgmem, &lv->lvid.id[1]);
return id_format_and_copy(lv->vg->vgmem, &lv->lvid.id[1]);
}
char *lv_tags_dup(const struct logical_volume *lv)
@@ -665,7 +534,6 @@ int lv_raid_image_in_sync(const struct logical_volume *lv)
if ((seg = first_seg(lv)))
raid_seg = get_only_segment_using_this_lv(seg->lv);
if (!raid_seg) {
log_error("Failed to find RAID segment for %s", lv->name);
return 0;

View File

@@ -35,8 +35,8 @@ struct logical_volume {
int32_t major;
int32_t minor;
uint64_t size; /* Sectors visible */
uint32_t le_count; /* Logical extents visible */
uint64_t size; /* Sectors */
uint32_t le_count;
uint32_t origin_count;
uint32_t external_count;
@@ -62,34 +62,25 @@ uint64_t lv_size(const struct logical_volume *lv);
uint64_t lv_metadata_size(const struct logical_volume *lv);
char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_with_info_and_seg_status *lvdm);
char *lv_attr_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_uuid_dup(const struct logical_volume *lv);
char *lv_tags_dup(const struct logical_volume *lv);
char *lv_path_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_dmpath_dup(struct dm_pool *mem, const struct logical_volume *lv);
uint64_t lv_origin_size(const struct logical_volume *lv);
char *lv_move_pv_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_move_pv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_convert_lv_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_convert_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
int lv_kernel_major(const struct logical_volume *lv);
int lv_kernel_minor(const struct logical_volume *lv);
char *lv_mirror_log_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_mirror_log_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_data_lv_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_data_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_metadata_lv_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_metadata_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_pool_lv_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_pool_lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_modules_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_name_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_fullname_dup(struct dm_pool *mem, const struct logical_volume *lv);
struct logical_volume *lv_parent(const struct logical_volume *lv);
char *lv_parent_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_origin_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_origin_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv);
uint32_t lv_kernel_read_ahead(const struct logical_volume *lv);
const char *lvseg_name(const struct lv_segment *seg);
uint64_t lvseg_start(const struct lv_segment *seg);
@@ -101,9 +92,7 @@ char *lvseg_cachemode_dup(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_monitor_dup(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_tags_dup(const struct lv_segment *seg);
char *lvseg_devices(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_metadata_devices(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_seg_pe_ranges(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_seg_metadata_le_ranges(struct dm_pool *mem, const struct lv_segment *seg);
char *lv_time_dup(struct dm_pool *mem, const struct logical_volume *lv, int iso_mode);
char *lv_host_dup(struct dm_pool *mem, const struct logical_volume *lv);
int lv_set_creation(struct logical_volume *lv,

View File

@@ -129,8 +129,6 @@ enum {
LV_TYPE_RAID6_ZR,
LV_TYPE_RAID6_NR,
LV_TYPE_RAID6_NC,
LV_TYPE_LOCKD,
LV_TYPE_SANLOCK
};
static const char *_lv_type_names[] = {
@@ -175,8 +173,6 @@ static const char *_lv_type_names[] = {
[LV_TYPE_RAID6_ZR] = SEG_TYPE_NAME_RAID6_ZR,
[LV_TYPE_RAID6_NR] = SEG_TYPE_NAME_RAID6_NR,
[LV_TYPE_RAID6_NC] = SEG_TYPE_NAME_RAID6_NC,
[LV_TYPE_LOCKD] = "lockd",
[LV_TYPE_SANLOCK] = "sanlock",
};
static int _lv_layout_and_role_mirror(struct dm_pool *mem,
@@ -228,7 +224,7 @@ static int _lv_layout_and_role_raid(struct dm_pool *mem,
int *public_lv)
{
int top_level = 0;
const struct segment_type *segtype;
const char *seg_name;
/* non-top-level LVs */
if (lv_is_raid_image(lv)) {
@@ -255,45 +251,43 @@ static int _lv_layout_and_role_raid(struct dm_pool *mem,
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID]))
goto_bad;
segtype = first_seg(lv)->segtype;
if (segtype_is_raid1(segtype)) {
if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID1)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID1]))
goto_bad;
} else if (segtype_is_raid10(segtype)) {
} else if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID10)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID10]))
goto_bad;
} else if (segtype_is_raid4(segtype)) {
} else if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID4)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID4]))
goto_bad;
} else if (segtype_is_any_raid5(segtype)) {
} else if (!strncmp(seg_name = first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID5, strlen(SEG_TYPE_NAME_RAID5))) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5]))
goto_bad;
if (segtype_is_raid5_la(segtype)) {
if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_LA)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_LA]))
goto_bad;
} else if (segtype_is_raid5_ra(segtype)) {
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_RA)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_RA]))
goto_bad;
} else if (segtype_is_raid5_ls(segtype)) {
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_LS)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_LS]))
goto_bad;
} else if (segtype_is_raid5_rs(segtype)) {
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_RS)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_RS]))
goto_bad;
}
} else if (segtype_is_any_raid6(segtype)) {
} else if (!strncmp(seg_name = first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID6, strlen(SEG_TYPE_NAME_RAID6))) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6]))
goto_bad;
if (segtype_is_raid6_zr(segtype)) {
if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_ZR)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_ZR]))
goto_bad;
} else if (segtype_is_raid6_nr(segtype)) {
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_NR)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_NR]))
goto_bad;
} else if (segtype_is_raid6_nc(segtype)) {
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_NC)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_NC]))
goto_bad;
}
@@ -509,13 +503,6 @@ int lv_layout_and_role(struct dm_pool *mem, const struct logical_volume *lv,
if (!_lv_layout_and_role_thick_origin_snapshot(mem, lv, *layout, *role, &public_lv))
goto_bad;
if (lv_is_lockd_sanlock_lv(lv)) {
if (!str_list_add_no_dup_check(mem, *role, _lv_type_names[LV_TYPE_LOCKD]) ||
!str_list_add_no_dup_check(mem, *role, _lv_type_names[LV_TYPE_SANLOCK]))
goto_bad;
public_lv = 0;
}
/*
* If layout not yet determined, it must be either
* linear or striped or mixture of these two.
@@ -791,7 +778,7 @@ int get_default_region_size(struct cmd_context *cmd)
if (region_size & (region_size - 1)) {
region_size = _round_down_pow2(region_size);
log_verbose("Reducing region size to %u kiB (power of 2).",
log_verbose("Reducing mirror region size to %u kiB (power of 2).",
region_size / 2);
}
@@ -941,7 +928,7 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
dm_list_iterate_items(seg, &lv->segments) {
denominator += seg->area_len;
/* FIXME Generalise name of 'extents_copied' field */
/* FIXME Generalise name of 'extents_copied' field */
if ((seg_is_raid(seg) || seg_is_mirrored(seg)) &&
(seg->area_count > 1))
numerator += seg->extents_copied;
@@ -949,27 +936,7 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
numerator += seg->area_len;
}
return denominator ? dm_make_percent(numerator, denominator) : 100.0;
}
/* Round up extents to next stripe boundary for number of stripes */
static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t extents,
uint32_t stripes, int extend)
{
uint32_t size_rest, new_extents = extents;
if (!stripes)
return extents;
/* Round up extents to stripe divisible amount */
if ((size_rest = extents % stripes)) {
new_extents += extend ? stripes - size_rest : -size_rest;
log_print_unless_silent("Rounding size %s (%d extents) up to stripe boundary size %s (%d extents).",
display_size(vg->cmd, extents * vg->extent_size), extents,
display_size(vg->cmd, new_extents * vg->extent_size), new_extents);
}
return new_extents;
return denominator ? dm_make_percent( numerator, denominator ) : 100.0;
}
/*
@@ -1044,7 +1011,7 @@ struct lv_segment *alloc_snapshot_seg(struct logical_volume *lv,
struct lv_segment *seg;
const struct segment_type *segtype;
segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_SNAPSHOT);
segtype = get_segtype_from_string(lv->vg->cmd, "snapshot");
if (!segtype) {
log_error("Failed to find snapshot segtype");
return NULL;
@@ -1068,7 +1035,6 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
uint32_t area_reduction, int with_discard)
{
struct lv_segment *cache_seg;
struct logical_volume *lv = seg_lv(seg, s);
if (seg_type(seg, s) == AREA_UNASSIGNED)
return 1;
@@ -1086,10 +1052,10 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return 1;
}
if (lv_is_mirror_image(lv) ||
lv_is_thin_pool_data(lv) ||
lv_is_cache_pool_data(lv)) {
if (!lv_reduce(lv, area_reduction))
if (lv_is_mirror_image(seg_lv(seg, s)) ||
lv_is_thin_pool_data(seg_lv(seg, s)) ||
lv_is_cache_pool_data(seg_lv(seg, s))) {
if (!lv_reduce(seg_lv(seg, s), area_reduction))
return_0; /* FIXME: any upper level reporting */
return 1;
}
@@ -1103,20 +1069,20 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return_0;
}
if (lv_is_raid_image(lv)) {
if (lv_is_raid_image(seg_lv(seg, s))) {
/*
* FIXME: Use lv_reduce not lv_remove
* We use lv_remove for now, because I haven't figured out
* why lv_reduce won't remove the LV.
lv_reduce(lv, area_reduction);
lv_reduce(seg_lv(seg, s), area_reduction);
*/
if (area_reduction != seg->area_len) {
log_error("Unable to reduce RAID LV - operation not implemented.");
return_0;
} else {
if (!lv_remove(lv)) {
if (!lv_remove(seg_lv(seg, s))) {
log_error("Failed to remove RAID image %s",
lv->name);
seg_lv(seg, s)->name);
return 0;
}
}
@@ -1137,9 +1103,9 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
log_very_verbose("Remove %s:%" PRIu32 "[%" PRIu32 "] from "
"the top of LV %s:%" PRIu32,
seg->lv->name, seg->le, s,
lv->name, seg_le(seg, s));
seg_lv(seg, s)->name, seg_le(seg, s));
if (!remove_seg_from_segs_using_this_lv(lv, seg))
if (!remove_seg_from_segs_using_this_lv(seg_lv(seg, s), seg))
return_0;
seg_lv(seg, s) = NULL;
seg_le(seg, s) = 0;
@@ -1275,51 +1241,6 @@ static int _lv_segment_add_areas(struct logical_volume *lv,
return 1;
}
static uint32_t _calc_area_multiple(const struct segment_type *segtype,
const uint32_t area_count,
const uint32_t stripes)
{
if (!area_count)
return 1;
/* Striped */
if (segtype_is_striped(segtype))
return area_count;
/* Parity RAID (e.g. RAID 4/5/6) */
if (segtype_is_raid(segtype) && segtype->parity_devs) {
/*
* As articulated in _alloc_init, we can tell by
* the area_count whether a replacement drive is
* being allocated; and if this is the case, then
* there is no area_multiple that should be used.
*/
if (area_count <= segtype->parity_devs)
return 1;
return area_count - segtype->parity_devs;
}
/*
* RAID10 - only has 2-way mirror right now.
* If we are to move beyond 2-way RAID10, then
* the 'stripes' argument will always need to
* be given.
*/
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
if (!stripes)
return area_count / 2;
return stripes;
}
/* Mirrored stripes */
if (stripes)
return stripes;
/* Mirrored */
return 1;
}
/*
* Reduce the size of an lv_segment. New size can be zero.
*/
@@ -1470,7 +1391,7 @@ int replace_lv_with_error_segment(struct logical_volume *lv)
/* FIXME Check for any attached LVs that will become orphans e.g. mirror logs */
if (!lv_add_virtual_segment(lv, 0, len, get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_ERROR)))
if (!lv_add_virtual_segment(lv, 0, len, get_segtype_from_string(lv->vg->cmd, "error")))
return_0;
return 1;
@@ -1537,10 +1458,10 @@ struct alloc_handle {
struct dm_pool *mem;
alloc_policy_t alloc; /* Overall policy */
int approx_alloc; /* get as much as possible up to new_extents */
int approx_alloc; /* get as much as possible up to new_extents */
uint32_t new_extents; /* Number of new extents required */
uint32_t area_count; /* Number of parallel areas */
uint32_t parity_count; /* Adds to area_count, but not area_multiple */
uint32_t parity_count; /* Adds to area_count, but not area_multiple */
uint32_t area_multiple; /* seg->len = area_len * area_multiple */
uint32_t log_area_count; /* Number of parallel logs */
uint32_t metadata_area_count; /* Number of parallel metadata areas */
@@ -1575,6 +1496,50 @@ struct alloc_handle {
struct dm_list alloced_areas[0];
};
static uint32_t _calc_area_multiple(const struct segment_type *segtype,
const uint32_t area_count,
const uint32_t stripes)
{
if (!area_count)
return 1;
/* Striped */
if (segtype_is_striped(segtype))
return area_count;
/* Parity RAID (e.g. RAID 4/5/6) */
if (segtype_is_raid(segtype) && segtype->parity_devs) {
/*
* As articulated in _alloc_init, we can tell by
* the area_count whether a replacement drive is
* being allocated; and if this is the case, then
* there is no area_multiple that should be used.
*/
if (area_count <= segtype->parity_devs)
return 1;
return area_count - segtype->parity_devs;
}
/*
* RAID10 - only has 2-way mirror right now.
* If we are to move beyond 2-way RAID10, then
* the 'stripes' argument will always need to
* be given.
*/
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
if (!stripes)
return area_count / 2;
return stripes;
}
/* Mirrored stripes */
if (stripes)
return stripes;
/* Mirrored */
return 1;
}
/*
* Returns log device size in extents, algorithm from kernel code
*/
@@ -1616,7 +1581,7 @@ static int _sufficient_pes_free(struct alloc_handle *ah, struct dm_list *pvms,
{
uint32_t area_extents_needed = (extents_still_needed - allocated) * ah->area_count / ah->area_multiple;
uint32_t parity_extents_needed = (extents_still_needed - allocated) * ah->parity_count / ah->area_multiple;
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN; /* One each */
uint32_t metadata_extents_needed = (ah->alloc_and_split_meta) ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN; /* One each */
uint32_t total_extents_needed = area_extents_needed + parity_extents_needed + metadata_extents_needed;
uint32_t free_pes = pv_maps_size(pvms);
@@ -1759,9 +1724,9 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
struct lv_segment *seg;
area_multiple = _calc_area_multiple(segtype, area_count, 0);
extents = aa[0].len * area_multiple;
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count,
aa[0].len * area_multiple,
status, stripe_size, NULL,
area_count,
aa[0].len, 0u, region_size, 0u, NULL))) {
@@ -1777,7 +1742,7 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
extents = aa[0].len * area_multiple;
lv->le_count += extents;
lv->size += (uint64_t) extents * lv->vg->extent_size;
lv->size += (uint64_t) extents *lv->vg->extent_size;
return 1;
}
@@ -1943,7 +1908,7 @@ static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
*max_seg_len = remaining_seg_len;
area_multiple = _calc_area_multiple(seg->segtype, seg->area_count, 0);
area_len = (remaining_seg_len / area_multiple) ? : 1;
area_len = remaining_seg_len / area_multiple ? : 1;
/* For striped mirrors, all the areas are counted, through the mirror layer */
if (top_level_area_index == -1)
@@ -2989,7 +2954,7 @@ static int _allocate(struct alloc_handle *ah,
if (ah->area_multiple > 1 &&
(ah->new_extents - alloc_state.allocated) % ah->area_multiple) {
log_error("Number of extents requested (" FMTu32 ") needs to be divisible by " FMTu32 ".",
log_error("Number of extents requested (%d) needs to be divisible by %d.",
ah->new_extents - alloc_state.allocated,
ah->area_multiple);
return 0;
@@ -3434,7 +3399,7 @@ static struct lv_segment *_convert_seg_to_mirror(struct lv_segment *seg,
return NULL;
}
if (!(newseg = alloc_lv_segment(get_segtype_from_string(seg->lv->vg->cmd, SEG_TYPE_NAME_MIRROR),
if (!(newseg = alloc_lv_segment(get_segtype_from_string(seg->lv->vg->cmd, "mirror"),
seg->lv, seg->le, seg->len,
seg->status, seg->stripe_size,
log_lv,
@@ -3527,7 +3492,7 @@ int lv_add_segmented_mirror_image(struct alloc_handle *ah,
if (!lv_add_mirror_lvs(lv, &copy_lv, 1, MIRROR_IMAGE, region_size))
return_0;
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "striped")))
return_0;
dm_list_iterate_items(aa, &ah->alloced_areas[0]) {
@@ -3641,7 +3606,7 @@ int lv_add_mirror_lvs(struct logical_volume *lv,
return 0;
}
mirror_segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_MIRROR);
mirror_segtype = get_segtype_from_string(lv->vg->cmd, "mirror");
if (seg->segtype != mirror_segtype)
if (!(seg = _convert_seg_to_mirror(seg, region_size, NULL)))
return_0;
@@ -3700,7 +3665,8 @@ int lv_add_log_segment(struct alloc_handle *ah, uint32_t first_area,
{
return lv_add_segment(ah, ah->area_count + first_area, 1, log_lv,
get_segtype_from_string(log_lv->vg->cmd, SEG_TYPE_NAME_STRIPED),
get_segtype_from_string(log_lv->vg->cmd,
"striped"),
0, status, 0);
}
@@ -3797,23 +3763,23 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
{
const struct segment_type *segtype;
struct logical_volume *sub_lv, *meta_lv;
struct lv_segment *seg = first_seg(lv);
struct lv_segment *seg;
uint32_t fa, s;
int clear_metadata = 0;
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
return_0;
segtype = get_segtype_from_string(lv->vg->cmd, "striped");
/*
* The component devices of a "striped" LV all go in the same
* LV. However, RAID has an LV for each device - making the
* 'stripes' and 'stripe_size' parameters meaningless.
*/
if (seg_is_raid(seg)) {
if (seg_is_raid(first_seg(lv))) {
stripes = 1;
stripe_size = 0;
}
seg = first_seg(lv);
for (fa = first_area, s = 0; s < seg->area_count; s++) {
if (is_temporary_mirror_layer(seg_lv(seg, s))) {
if (!_lv_extend_layered_lv(ah, seg_lv(seg, s), extents,
@@ -4239,18 +4205,6 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
/*
* The lvmlockd LV lock is only acquired here to ensure the LV is not
* active on another host. This requests a transient LV lock.
* If the LV is active, a persistent LV lock already exists in
* lvmlockd, and the transient lock request does nothing.
* If the LV is not active, then no LV lock exists and the transient
* lock request acquires the LV lock (or fails). The transient lock
* is automatically released when the command exits.
*/
if (!lockd_lv(cmd, lv, "ex", 0))
return_0;
if (update_mda && !archive(vg))
return_0;
@@ -4424,25 +4378,6 @@ static int _fsadm_cmd(struct cmd_context *cmd,
return exec_cmd(cmd, argv, status, 1);
}
static int _adjust_amount(dm_percent_t percent, int policy_threshold, int *policy_amount)
{
if (!(DM_PERCENT_0 < percent && percent <= DM_PERCENT_100) ||
percent <= (policy_threshold * DM_PERCENT_1))
return 0;
/*
* Evaluate the minimal amount needed to get bellow threshold.
* Keep using DM_PERCENT_1 units for better precision.
* Round-up to needed percentage value
*/
percent = (percent/policy_threshold + (DM_PERCENT_1 - 1) / 100) / (DM_PERCENT_1 / 100) - 100;
/* Use it if current policy amount is smaller */
if (*policy_amount < percent)
*policy_amount = percent;
return 1;
}
static int _adjust_policy_params(struct cmd_context *cmd,
struct logical_volume *lv, struct lvresize_params *lp)
{
@@ -4452,64 +4387,48 @@ static int _adjust_policy_params(struct cmd_context *cmd,
if (lv_is_thin_pool(lv)) {
policy_threshold =
find_config_tree_int(cmd, activation_thin_pool_autoextend_threshold_CFG,
lv_config_profile(lv));
lv_config_profile(lv)) * DM_PERCENT_1;
policy_amount =
find_config_tree_int(cmd, activation_thin_pool_autoextend_percent_CFG,
lv_config_profile(lv));
if (policy_threshold < 50) {
log_warn("WARNING: Thin pool autoextend threshold %d%% is set below "
"minimum supported 50%%.", policy_threshold);
policy_threshold = 50;
}
if (!policy_amount && policy_threshold < DM_PERCENT_100)
return 0;
} else {
policy_threshold =
find_config_tree_int(cmd, activation_snapshot_autoextend_threshold_CFG, NULL);
find_config_tree_int(cmd, activation_snapshot_autoextend_threshold_CFG, NULL) * DM_PERCENT_1;
policy_amount =
find_config_tree_int(cmd, activation_snapshot_autoextend_percent_CFG, NULL);
if (policy_threshold < 50) {
log_warn("WARNING: Snapshot autoextend threshold %d%% is set bellow "
"minimal supported value 50%%.", policy_threshold);
policy_threshold = 50;
}
}
if (!policy_amount && policy_threshold < 100) {
log_error("Can't extend %s with %s autoextend percent set to 0%%.",
display_lvname(lv), first_seg(lv)->segtype->name);
return 0;
}
if (policy_threshold >= 100)
if (policy_threshold >= DM_PERCENT_100)
return 1; /* nothing to do */
if (!lv_is_active_locally(lv)) {
log_error("Can't read state of locally inactive LV %s.",
display_lvname(lv));
return 0;
}
if (lv_is_thin_pool(lv)) {
if (!lv_thin_pool_percent(lv, 1, &percent))
return_0;
if (_adjust_amount(percent, policy_threshold, &policy_amount)) {
if ((DM_PERCENT_0 < percent && percent <= DM_PERCENT_100) &&
(percent > policy_threshold)) {
if (!thin_pool_feature_supported(lv, THIN_FEATURE_METADATA_RESIZE)) {
log_error_once("Online metadata resize for %s is not supported.",
display_lvname(lv));
log_error_once("Online metadata resize for %s/%s is not supported.",
lv->vg->name, lv->name);
return 0;
}
lp->poolmetadatasize = (first_seg(lv)->metadata_lv->size *
policy_amount + 99) / 100;
lp->poolmetadatasign = SIGN_PLUS;
}
if (!lv_thin_pool_percent(lv, 0, &percent))
return_0;
if (!(DM_PERCENT_0 < percent && percent <= DM_PERCENT_100) ||
percent <= policy_threshold)
return 1;
} else {
if (!lv_snapshot_percent(lv, &percent))
return_0;
}
if (!_adjust_amount(percent, policy_threshold, &policy_amount))
if (!(DM_PERCENT_0 < percent && percent <= DM_PERCENT_100) || percent <= policy_threshold)
return 1; /* nothing to do */
}
lp->extents = policy_amount;
lp->sizeargs = (lp->extents) ? 1 : 0;
@@ -4605,7 +4524,7 @@ static int _lvresize_poolmetadata(struct cmd_context *cmd, struct volume_group *
struct dm_list *pvh)
{
struct logical_volume *lv = first_seg(pool_lv)->metadata_lv;
alloc_policy_t alloc = lp->ac_alloc ? : lv->alloc;
alloc_policy_t alloc = lp->ac_alloc ?: lv->alloc;
struct lv_segment *mseg = last_seg(lv);
uint32_t seg_mirrors = lv_mirror_count(lv);
@@ -5174,11 +5093,6 @@ static int _lvresize_check_type(struct cmd_context *cmd, const struct logical_vo
}
}
if ((lp->resize == LV_REDUCE) && lv_is_thin_pool_metadata(lv)) {
log_error("Thin pool metadata volumes cannot be reduced.");
return 0;
}
if (lv_is_thin_volume(lv) && first_seg(lv)->external_lv &&
(lp->resize == LV_EXTEND)) {
/* Validate thin target supports bigger size of thin volume then external origin */
@@ -5214,7 +5128,7 @@ static struct logical_volume *_lvresize_volume(struct cmd_context *cmd,
/* Switch to layered LV resizing */
lv = seg_lv(seg, 0);
}
alloc = lp->ac_alloc ? : lv->alloc;
alloc = lp->ac_alloc ?: lv->alloc;
if ((lp->resize == LV_REDUCE) && lp->argc)
log_print_unless_silent("Ignoring PVs on command line when reducing.");
@@ -5231,7 +5145,7 @@ static struct logical_volume *_lvresize_volume(struct cmd_context *cmd,
log_error("Filesystem check failed.");
return NULL;
}
/* some filesystems support online resize */
/* some filesystems supports online resize */
}
/* FIXME forks here */
@@ -5370,7 +5284,7 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
*/
inactive = 1;
if (!activate_lv_excl(cmd, lock_lv)) {
log_error("Failed to activate %s.", display_lvname(lock_lv));
log_error("Failed to activate %s.", lock_lv->name);
return 0;
}
}
@@ -5391,12 +5305,12 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
backup(vg);
if (inactive && !deactivate_lv(cmd, lock_lv)) {
log_error("Problem deactivating %s.", display_lvname(lock_lv));
log_error("Problem deactivating %s.", lock_lv->name);
return 0;
}
}
log_print_unless_silent("Logical volume %s successfully resized.", lp->lv_name);
log_print_unless_silent("Logical volume %s successfully resized", lp->lv_name);
if (lp->resizefs && (lp->resize == LV_EXTEND) &&
!_fsadm_cmd(cmd, vg, lp, FSADM_CMD_RESIZE, NULL))
@@ -6267,7 +6181,7 @@ int remove_layers_for_segments(struct cmd_context *cmd,
/* Replace mirror with error segment */
if (!(lseg->segtype =
get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_ERROR))) {
get_segtype_from_string(lv->vg->cmd, "error"))) {
log_error("Missing error segtype");
return 0;
}
@@ -6321,7 +6235,6 @@ int move_lv_segments(struct logical_volume *lv_to,
struct logical_volume *lv_from,
uint64_t set_status, uint64_t reset_status)
{
const uint64_t MOVE_BITS = (RAID | MIRROR | THIN_VOLUME);
struct lv_segment *seg;
dm_list_iterate_items(seg, &lv_to->segments)
@@ -6339,16 +6252,6 @@ int move_lv_segments(struct logical_volume *lv_to,
seg->status |= set_status;
}
/*
* Move LV status bits for selected types with their segments
* i.e. when inserting layer to cache LV, we move raid segments
* to a new place, thus 'raid' LV property now belongs to this LV.
*
* Bits should match to those which appears after read from disk.
*/
lv_to->status |= lv_from->status & MOVE_BITS;
lv_from->status &= ~MOVE_BITS;
lv_to->le_count = lv_from->le_count;
lv_to->size = lv_from->size;
@@ -6401,7 +6304,7 @@ int remove_layer_from_lv(struct logical_volume *lv,
return_0;
/* Replace the empty layer with error segment */
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_ERROR)))
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "error")))
return_0;
if (!lv_add_virtual_segment(layer_lv, 0, parent_lv->le_count, segtype))
return_0;
@@ -6465,7 +6368,7 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
if (lv_is_active(lv_where) && strstr(name, "_mimagetmp")) {
log_very_verbose("Creating transient LV %s for mirror conversion in VG %s.", name, lv_where->vg->name);
segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_ERROR);
segtype = get_segtype_from_string(cmd, "error");
if (!lv_add_virtual_segment(layer_lv, 0, lv_where->le_count, segtype)) {
log_error("Creation of transient LV %s for mirror conversion in VG %s failed.", name, lv_where->vg->name);
@@ -6513,7 +6416,7 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
if (!move_lv_segments(layer_lv, lv_where, 0, 0))
return_NULL;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(cmd, "striped")))
return_NULL;
/* allocate a new linear segment */
@@ -6563,7 +6466,7 @@ static int _extend_layer_lv_for_segment(struct logical_volume *layer_lv,
if (seg_type(seg, s) != AREA_PV && seg_type(seg, s) != AREA_LV)
return_0;
if (!(segtype = get_segtype_from_string(layer_lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(layer_lv->vg->cmd, "striped")))
return_0;
/* FIXME Incomplete message? Needs more context */
@@ -6836,7 +6739,7 @@ static struct logical_volume *_create_virtual_origin(struct cmd_context *cmd,
char vorigin_name[NAME_LEN];
struct logical_volume *lv;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_ZERO))) {
if (!(segtype = get_segtype_from_string(cmd, "zero"))) {
log_error("Zero segment type for virtual origin not found");
return NULL;
}
@@ -6990,7 +6893,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
const char *new_lv_name)
{
struct cmd_context *cmd = vg->cmd;
uint32_t size;
uint32_t size_rest, size;
uint64_t status = lp->permission | VISIBLE_LV;
const struct segment_type *create_segtype = lp->segtype;
struct logical_volume *lv, *origin_lv = NULL;
@@ -7055,7 +6958,12 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
lp->stripe_size = vg->extent_size;
}
lp->extents = _round_to_stripe_boundary(vg, lp->extents, lp->stripes, 1);
if ((size_rest = lp->extents % lp->stripes)) {
log_print_unless_silent("Rounding size (%d extents) up to stripe boundary "
"size (%d extents).", lp->extents,
lp->extents - size_rest + lp->stripes);
lp->extents = lp->extents - size_rest + lp->stripes;
}
if (!lp->extents && !seg_is_thin_volume(lp)) {
log_error(INTERNAL_ERROR "Unable to create new logical volume with no extents.");
@@ -7078,7 +6986,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (seg_is_pool(lp))
status |= LVM_WRITE; /* Pool is always writable */
else if (seg_is_cache(lp) || seg_is_thin_volume(lp)) {
else if (seg_is_cache(lp) || seg_is_thin_volume(lp)) {
/* Resolve pool volume */
if (!lp->pool_name) {
/* Should be already checked */
@@ -7113,25 +7021,12 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
}
}
if (seg_is_thin_volume(lp)) {
if (seg_is_thin_volume(lp) &&
lv_is_new_thin_pool(pool_lv)) {
thin_pool_was_active = lv_is_active(pool_lv);
if (lv_is_new_thin_pool(pool_lv)) {
if (!check_new_thin_pool(pool_lv))
return_NULL;
/* New pool is now inactive */
} else {
if (!activate_lv_excl_local(cmd, pool_lv)) {
log_error("Aborting. Failed to locally activate thin pool %s.",
display_lvname(pool_lv));
return 0;
}
if (!pool_below_threshold(first_seg(pool_lv))) {
log_error("Cannot create new thin volume, free space in "
"thin pool %s reached threshold.",
display_lvname(pool_lv));
return NULL;
}
}
if (!check_new_thin_pool(pool_lv))
return_NULL;
/* New pool is now inactive */
}
if (seg_is_cache(lp) &&
@@ -7167,11 +7062,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
display_lvname(origin_lv));
return NULL;
}
} else if (seg_is_cache(lp)) {
if (!pool_lv) {
log_error(INTERNAL_ERROR "Pool LV for cache is missing.");
return NULL;
}
} else if (pool_lv && seg_is_cache(lp)) {
if (!lv_is_cache_pool(pool_lv)) {
log_error("Logical volume %s is not a cache pool.",
display_lvname(pool_lv));
@@ -7179,7 +7070,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
}
/* Create cache origin for cache pool */
/* FIXME Eventually support raid/mirrors with -m */
if (!(create_segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(create_segtype = get_segtype_from_string(vg->cmd, "striped")))
return_0;
} else if (seg_is_mirrored(lp) || seg_is_raid(lp)) {
if (is_change_activating(lp->activate) && (lp->activate != CHANGE_AEY) &&
@@ -7248,7 +7139,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (lv_is_cow(origin_lv)) {
log_error("Snapshots of snapshots are not supported.");
log_error("Snapshots of snapshots are not "
"supported yet.");
return NULL;
}
if (lv_is_locked(origin_lv)) {
@@ -7294,7 +7186,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return_NULL;
/* The snapshot segment gets created later */
if (!(create_segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
if (!(create_segtype = get_segtype_from_string(cmd, "striped")))
return_NULL;
/* Must zero cow */
@@ -7314,7 +7206,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!archive(vg))
return_NULL;
if (pool_lv && seg_is_thin_volume(lp)) {
/* Ensure all stacked messages are submitted */
if ((pool_is_active(pool_lv) || is_change_activating(lp->activate)) &&
@@ -7361,25 +7252,16 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
memlock_unlock(vg->cmd);
if (seg_is_cache_pool(lp) || seg_is_cache(lp)) {
if (!cache_set_mode(first_seg(lv), lp->cache_mode)) {
stack;
goto revert_new_lv;
}
if (!cache_set_policy(first_seg(lv), lp->policy_name, lp->policy_settings)) {
stack;
goto revert_new_lv;
}
pool_lv = pool_lv ? : lv;
if (lp->chunk_size) {
first_seg(pool_lv)->chunk_size = lp->chunk_size;
/* TODO: some calc_policy solution for cache ? */
if (!recalculate_pool_chunk_size_with_dev_hints(pool_lv, lp->passed_args,
THIN_CHUNK_SIZE_CALC_METHOD_GENERIC)) {
stack;
goto revert_new_lv;
}
if (!lv_cache_set_policy(pool_lv, lp->policy_name, lp->policy_settings))
return_NULL; /* revert? */
first_seg(pool_lv)->chunk_size = lp->chunk_size;
first_seg(pool_lv)->feature_flags = lp->feature_flags;
/* TODO: some calc_policy solution for cache ? */
if (!recalculate_pool_chunk_size_with_dev_hints(pool_lv, lp->passed_args,
THIN_CHUNK_SIZE_CALC_METHOD_GENERIC)) {
stack;
goto revert_new_lv;
}
} else if (seg_is_raid(lp)) {
first_seg(lv)->min_recovery_rate = lp->min_recovery_rate;
@@ -7388,6 +7270,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
first_seg(lv)->chunk_size = lp->chunk_size;
first_seg(lv)->zero_new_blocks = lp->zero ? 1 : 0;
first_seg(lv)->discards = lp->discards;
/* FIXME: use lowwatermark via lvm.conf global for all thinpools ? */
first_seg(lv)->low_water_mark = 0;
if (!recalculate_pool_chunk_size_with_dev_hints(lv, lp->passed_args,
lp->thin_chunk_size_calc_policy)) {
stack;
@@ -7582,14 +7466,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
}
lv = tmp_lv;
if (!cache_set_mode(first_seg(lv), lp->cache_mode))
return_NULL; /* revert? */
if (!cache_set_policy(first_seg(lv), lp->policy_name, lp->policy_settings))
return_NULL; /* revert? */
cache_check_for_warns(first_seg(lv));
if (!lv_update_and_reload(lv)) {
/* FIXME Do a better revert */
log_error("Aborting. Manual intervention required.");
@@ -7689,7 +7565,7 @@ struct logical_volume *lv_create_single(struct volume_group *vg,
if (lp->create_pool && !seg_is_pool(lp)) {
segtype = lp->segtype;
if (seg_is_thin_volume(lp)) {
if (!(lp->segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_THIN_POOL)))
if (!(lp->segtype = get_segtype_from_string(vg->cmd, "thin-pool")))
return_NULL;
if (!(lv = _lv_create_an_lv(vg, lp, lp->pool_name)))
@@ -7701,7 +7577,7 @@ struct logical_volume *lv_create_single(struct volume_group *vg,
return NULL;
}
/* origin_name is defined -> creates cache LV with new cache pool */
if (!(lp->segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_CACHE_POOL)))
if (!(lp->segtype = get_segtype_from_string(vg->cmd, "cache-pool")))
return_NULL;
if (!(lv = _lv_create_an_lv(vg, lp, lp->pool_name)))

View File

@@ -119,24 +119,12 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
inc_error_count;
}
if (lv_is_pool_metadata(lv)) {
if (!(seg2 = first_seg(lv)) || !(seg2 = find_pool_seg(seg2)) ||
seg2->metadata_lv != lv) {
log_error("LV %s: segment 1 pool metadata LV does not point back to same LV",
lv->name);
inc_error_count;
}
if (lv_is_thin_pool_metadata(lv) &&
!strstr(lv->name, "_tmeta")) {
log_error("LV %s: thin pool metadata LV does not use _tmeta",
lv->name);
inc_error_count;
} else if (lv_is_cache_pool_metadata(lv) &&
!strstr(lv->name, "_cmeta")) {
log_error("LV %s: cache pool metadata LV does not use _cmeta",
lv->name);
inc_error_count;
}
if (lv_is_pool_metadata(lv) &&
(!(seg2 = first_seg(lv)) || !(seg2 = find_pool_seg(seg2)) ||
seg2->metadata_lv != lv)) {
log_error("LV %s: segment 1 pool metadata LV does not point back to same LV",
lv->name);
inc_error_count;
}
}
@@ -220,21 +208,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
}
}
if (seg_is_cache_pool(seg) &&
!dm_list_empty(&seg->lv->segs_using_this_lv)) {
switch (seg->feature_flags &
(DM_CACHE_FEATURE_PASSTHROUGH |
DM_CACHE_FEATURE_WRITETHROUGH |
DM_CACHE_FEATURE_WRITEBACK)) {
case DM_CACHE_FEATURE_PASSTHROUGH:
case DM_CACHE_FEATURE_WRITETHROUGH:
case DM_CACHE_FEATURE_WRITEBACK:
break;
default:
log_error("LV %s has invalid cache's feature flag.",
lv->name);
inc_error_count;
}
if (seg_is_cache_pool(seg)) {
if (!seg->policy_name) {
log_error("LV %s is missing cache policy name.", lv->name);
inc_error_count;

View File

@@ -163,9 +163,10 @@
/* vg_read and vg_read_for_update flags */
#define READ_ALLOW_INCONSISTENT 0x00010000U
#define READ_ALLOW_EXPORTED 0x00020000U
#define READ_OK_NOTFOUND 0x00040000U
#define READ_WARN_INCONSISTENT 0x00080000U
#define READ_FOR_UPDATE 0x00100000U /* A meta-flag, useful with toollib for_each_* functions. */
/* A meta-flag, useful with toollib for_each_* functions. */
#define READ_FOR_UPDATE 0x00100000U
/* vg's "read_status" field */
#define FAILED_INCONSISTENT 0x00000001U
@@ -454,6 +455,7 @@ struct lv_segment {
struct lv_segment_area *meta_areas; /* For RAID */
struct logical_volume *metadata_lv; /* For thin_pool */
uint64_t transaction_id; /* For thin_pool, thin */
uint64_t low_water_mark; /* For thin_pool */
unsigned zero_new_blocks; /* For thin_pool */
thin_discards_t discards; /* For thin_pool */
struct dm_list thin_messages; /* For thin_pool */
@@ -648,9 +650,9 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
* Return a handle to VG metadata.
*/
struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t read_flags, uint32_t lockd_state);
const char *vgid, uint32_t flags, uint32_t lockd_state);
struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t read_flags, uint32_t lockd_state);
const char *vgid, uint32_t flags, uint32_t lockd_state);
/*
* Test validity of a VG handle.
@@ -899,7 +901,7 @@ struct lvcreate_params {
uint32_t min_recovery_rate; /* RAID */
uint32_t max_recovery_rate; /* RAID */
const char *cache_mode; /* cache */
uint64_t feature_flags; /* cache */
const char *policy_name; /* cache */
struct dm_config_tree *policy_settings; /* cache */
@@ -1151,12 +1153,8 @@ struct lv_status_cache {
dm_percent_t dirty_usage;
};
const char *get_cache_mode_name(const struct lv_segment *cache_seg);
int cache_mode_is_set(const struct lv_segment *seg);
int cache_set_mode(struct lv_segment *cache_seg, const char *str);
int cache_set_policy(struct lv_segment *cache_seg, const char *name,
const struct dm_config_tree *settings);
void cache_check_for_warns(const struct lv_segment *seg);
const char *get_cache_pool_cachemode_name(const struct lv_segment *seg);
int set_cache_pool_feature(uint64_t *feature_flags, const char *str);
int update_cache_pool_params(const struct segment_type *segtype,
struct volume_group *vg, unsigned attr,
int passed_args, uint32_t pool_data_extents,
@@ -1167,6 +1165,8 @@ int validate_lv_cache_create_origin(const struct logical_volume *origin_lv);
struct logical_volume *lv_cache_create(struct logical_volume *pool,
struct logical_volume *origin);
int lv_cache_remove(struct logical_volume *cache_lv);
int lv_cache_set_policy(struct logical_volume *cache_lv, const char *name,
const struct dm_config_tree *settings);
int wipe_cache_pool(struct logical_volume *cache_pool_lv);
/* -- metadata/cache_manip.c */

View File

@@ -319,11 +319,10 @@ static struct pv_list *_copy_pvl(struct dm_pool *pvmem, struct pv_list *pvl_from
if (!(pvl_to->pv = dm_pool_alloc(pvmem, sizeof(*pvl_to->pv))))
goto_bad;
if (!_copy_pv(pvmem, pvl_to->pv, pvl_from->pv))
if(!_copy_pv(pvmem, pvl_to->pv, pvl_from->pv))
goto_bad;
return pvl_to;
bad:
dm_pool_free(pvmem, pvl_to);
return NULL;
@@ -3011,7 +3010,7 @@ out:
int vg_write(struct volume_group *vg)
{
struct dm_list *mdah;
struct pv_to_create *pv_to_create, *pv_to_create_safe;
struct pv_to_create *pv_to_create;
struct metadata_area *mda;
struct lv_list *lvl;
int revert = 0, wrote = 0;
@@ -3067,11 +3066,10 @@ int vg_write(struct volume_group *vg)
memlock_unlock(vg->cmd);
vg->seqno++;
dm_list_iterate_items_safe(pv_to_create, pv_to_create_safe, &vg->pvs_to_create) {
dm_list_iterate_items(pv_to_create, &vg->pvs_to_create) {
if (!_pvcreate_write(vg->cmd, pv_to_create))
return 0;
dm_list_del(&pv_to_create->list);
}
}
/* Write to each copy of the metadata area */
dm_list_iterate_items(mda, &vg->fid->metadata_areas_in_use) {
@@ -4925,7 +4923,7 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg
* Consolidated locking, reading, and status flag checking.
*
* If the metadata is inconsistent, setting READ_ALLOW_INCONSISTENT in
* read_flags will return it with FAILED_INCONSISTENT set instead of
* misc_flags will return it with FAILED_INCONSISTENT set instead of
* giving you nothing.
*
* Use vg_read_error(vg) to determine the result. Nonzero means there were
@@ -4933,10 +4931,8 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg
* Zero value means that the VG is open and appropriate locks are held.
*/
static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid,
uint32_t lock_flags,
uint64_t status_flags,
uint32_t read_flags,
const char *vgid, uint32_t lock_flags,
uint64_t status_flags, uint32_t misc_flags,
uint32_t lockd_state)
{
struct volume_group *vg = NULL;
@@ -4946,7 +4942,7 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
uint32_t warn_flags = 0;
int already_locked;
if ((read_flags & READ_ALLOW_INCONSISTENT) || (lock_flags != LCK_VG_WRITE))
if (misc_flags & READ_ALLOW_INCONSISTENT || lock_flags != LCK_VG_WRITE)
consistent = 0;
if (!validate_name(vg_name) && !is_orphan_vg(vg_name)) {
@@ -4969,7 +4965,7 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
consistent_in = consistent;
warn_flags = WARN_PV_READ;
if (consistent || (read_flags & READ_WARN_INCONSISTENT))
if (consistent || (misc_flags & READ_WARN_INCONSISTENT))
warn_flags |= WARN_INCONSISTENT;
/* If consistent == 1, we get NULL here if correction fails. */
@@ -4978,8 +4974,7 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
failure |= FAILED_INCONSISTENT;
goto bad;
}
if (!(read_flags & READ_OK_NOTFOUND))
log_error("Volume group \"%s\" not found", vg_name);
log_error("Volume group \"%s\" not found", vg_name);
failure |= FAILED_NOTFOUND;
goto bad;
}
@@ -5060,20 +5055,20 @@ bad_no_unlock:
* *consistent = 1.
*/
struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t read_flags, uint32_t lockd_state)
const char *vgid, uint32_t flags, uint32_t lockd_state)
{
uint64_t status_flags = UINT64_C(0);
uint64_t status = UINT64_C(0);
uint32_t lock_flags = LCK_VG_READ;
if (read_flags & READ_FOR_UPDATE) {
status_flags |= EXPORTED_VG | LVM_WRITE;
if (flags & READ_FOR_UPDATE) {
status |= EXPORTED_VG | LVM_WRITE;
lock_flags = LCK_VG_WRITE;
}
if (read_flags & READ_ALLOW_EXPORTED)
status_flags &= ~EXPORTED_VG;
if (flags & READ_ALLOW_EXPORTED)
status &= ~EXPORTED_VG;
return _vg_lock_and_read(cmd, vg_name, vgid, lock_flags, status_flags, read_flags, lockd_state);
return _vg_lock_and_read(cmd, vg_name, vgid, lock_flags, status, flags, lockd_state);
}
/*
@@ -5082,9 +5077,9 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
* request the new metadata to be written and committed).
*/
struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t read_flags, uint32_t lockd_state)
const char *vgid, uint32_t flags, uint32_t lockd_state)
{
return vg_read(cmd, vg_name, vgid, read_flags | READ_FOR_UPDATE, lockd_state);
return vg_read(cmd, vg_name, vgid, flags | READ_FOR_UPDATE, lockd_state);
}
/*

View File

@@ -83,7 +83,7 @@ int cluster_mirror_is_available(struct cmd_context *cmd)
unsigned attr = 0;
const struct segment_type *segtype;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_MIRROR)))
if (!(segtype = get_segtype_from_string(cmd, "mirror")))
return_0;
if (!segtype->ops->target_present)
@@ -112,7 +112,7 @@ uint32_t lv_mirror_count(const struct logical_volume *lv)
seg = first_seg(lv);
/* FIXME: RAID10 only supports 2 copies right now */
if (seg_is_raid10(seg))
if (!strcmp(seg->segtype->name, "raid10"))
return 2;
if (lv_is_pvmove(lv))
@@ -1493,7 +1493,8 @@ static int _create_mimage_lvs(struct alloc_handle *ah,
}
} else {
if (!lv_add_segment(ah, m * stripes, stripes, img_lvs[m],
get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED),
get_segtype_from_string(lv->vg->cmd,
"striped"),
stripe_size, 0, 0)) {
log_error("Aborting. Failed to add mirror image segment "
"to %s. Remove new LV and retry.",
@@ -1546,7 +1547,8 @@ int remove_mirrors_from_segments(struct logical_volume *lv,
seg->area_count = new_mirrors + 1;
if (!new_mirrors)
seg->segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED);
seg->segtype = get_segtype_from_string(lv->vg->cmd,
"striped");
}
return 1;
@@ -1718,7 +1720,7 @@ int fixup_imported_mirrors(struct volume_group *vg)
dm_list_iterate_items(lvl, &vg->lvs) {
dm_list_iterate_items(seg, &lvl->lv->segments) {
if (seg->segtype !=
get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_MIRROR))
get_segtype_from_string(vg->cmd, "mirror"))
continue;
if (seg->log_lv && !add_seg_to_segs_using_this_lv(seg->log_lv, seg))
@@ -1746,7 +1748,7 @@ static int _add_mirrors_that_preserve_segments(struct logical_volume *lv,
if (!(parallel_areas = build_parallel_areas_from_lv(lv, 1, 0)))
return_0;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_MIRROR)))
if (!(segtype = get_segtype_from_string(cmd, "mirror")))
return_0;
adjusted_region_size = adjusted_mirror_region_size(lv->vg->extent_size,
@@ -2031,7 +2033,7 @@ int add_mirror_log(struct cmd_context *cmd, struct logical_volume *lv,
if (!(parallel_areas = build_parallel_areas_from_lv(lv, 0, 0)))
return_0;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_MIRROR)))
if (!(segtype = get_segtype_from_string(cmd, "mirror")))
return_0;
if (activation() && segtype->ops->target_present &&
@@ -2104,7 +2106,7 @@ int add_mirror_images(struct cmd_context *cmd, struct logical_volume *lv,
if (!(parallel_areas = build_parallel_areas_from_lv(lv, 0, 0)))
return_0;
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_MIRROR)))
if (!(segtype = get_segtype_from_string(cmd, "mirror")))
return_0;
ah = allocate_extents(lv->vg, NULL, segtype,

View File

@@ -438,7 +438,7 @@ int create_pool(struct logical_volume *pool_lv,
}
/* LV is not yet a pool, so it's extension from lvcreate */
if (!(striped = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(striped = get_segtype_from_string(pool_lv->vg->cmd, "striped")))
return_0;
if (activation() && striped->ops->target_present &&
@@ -561,7 +561,7 @@ struct logical_volume *alloc_pool_metadata(struct logical_volume *pool_lv,
.zero = 1,
};
if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, "striped")))
return_0;
/* FIXME: allocate properly space for metadata_lv */
@@ -597,7 +597,7 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg
.zero = 1,
};
if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(lp.segtype = get_segtype_from_string(vg->cmd, "striped")))
return_0;
/* FIXME: Maybe using silent mode ? */

View File

@@ -29,9 +29,9 @@ char *pv_fmt_dup(const struct physical_volume *pv)
return dm_pool_strdup(pv->vg->vgmem, pv->fmt->name);
}
char *pv_name_dup(struct dm_pool *mem, const struct physical_volume *pv)
char *pv_name_dup(const struct physical_volume *pv)
{
return dm_pool_strdup(mem ? mem : pv->vg->vgmem, dev_name(pv->dev));
return dm_pool_strdup(pv->vg->vgmem, dev_name(pv->dev));
}
/*
@@ -42,9 +42,9 @@ struct id pv_id(const struct physical_volume *pv)
return pv_field(pv, id);
}
char *pv_uuid_dup(struct dm_pool *mem, const struct physical_volume *pv)
char *pv_uuid_dup(const struct physical_volume *pv)
{
return id_format_and_copy(mem ? mem : pv->vg->vgmem, &pv->id);
return id_format_and_copy(pv->vg->vgmem, &pv->id);
}
char *pv_tags_dup(const struct physical_volume *pv)

View File

@@ -68,12 +68,12 @@ struct physical_volume {
};
char *pv_fmt_dup(const struct physical_volume *pv);
char *pv_name_dup(struct dm_pool *mem, const struct physical_volume *pv);
char *pv_name_dup(const struct physical_volume *pv);
struct device *pv_dev(const struct physical_volume *pv);
const char *pv_vg_name(const struct physical_volume *pv);
char *pv_attr_dup(struct dm_pool *mem, const struct physical_volume *pv);
const char *pv_dev_name(const struct physical_volume *pv);
char *pv_uuid_dup(struct dm_pool *mem, const struct physical_volume *pv);
char *pv_uuid_dup(const struct physical_volume *pv);
char *pv_tags_dup(const struct physical_volume *pv);
uint64_t pv_size(const struct physical_volume *pv);
uint64_t pv_size_field(const struct physical_volume *pv);

View File

@@ -781,7 +781,7 @@ int pvremove_single(struct cmd_context *cmd, const char *pv_name,
goto out;
}
info = lvmcache_info_from_pvid(dev->pvid, 0);
info = lvmcache_info_from_pvid(dev->pvid, 1);
if (!dev_test_excl(dev)) {
/* FIXME Detect whether device-mapper is still using the device */

View File

@@ -21,6 +21,7 @@
#include "activate.h"
#include "lv_alloc.h"
#include "lvm-string.h"
#include "lvmlockd.h"
static int _lv_is_raid_with_tracking(const struct logical_volume *lv,
struct logical_volume **tracking)
@@ -397,10 +398,8 @@ static struct logical_volume *_alloc_image_component(struct logical_volume *lv,
}
if (dm_snprintf(img_name, sizeof(img_name), "%s_%s_%%d",
(alt_base_name) ? : lv->name, type_suffix) < 0) {
log_error("Component name for raid %s is too long.", lv->name);
return 0;
}
(alt_base_name) ? : lv->name, type_suffix) < 0)
return_0;
status = LVM_READ | LVM_WRITE | LV_REBUILD | type;
if (!(tmp_lv = lv_create_empty(img_name, NULL, status, ALLOC_INHERIT, lv->vg))) {
@@ -408,7 +407,7 @@ static struct logical_volume *_alloc_image_component(struct logical_volume *lv,
return 0;
}
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
if (!(segtype = get_segtype_from_string(lv->vg->cmd, "striped")))
return_0;
if (!lv_add_segment(ah, first_area, 1, tmp_lv, segtype, 0, status, 0)) {
@@ -460,12 +459,9 @@ static int _alloc_image_components(struct logical_volume *lv,
* individual devies, we must specify how large the individual device
* is along with the number we want ('count').
*/
if (segtype_is_raid10(segtype))
extents = lv->le_count / (seg->area_count / 2); /* we enforce 2 mirrors right now */
else
extents = (segtype->parity_devs) ?
(lv->le_count / (seg->area_count - segtype->parity_devs)) :
lv->le_count;
extents = (segtype->parity_devs) ?
(lv->le_count / (seg->area_count - segtype->parity_devs)) :
lv->le_count;
if (!(ah = allocate_extents(lv->vg, NULL, segtype, 0, count, count,
region_size, extents, pvs,
@@ -870,7 +866,7 @@ static int _raid_extract_images(struct logical_volume *lv, uint32_t new_count,
sizeof(*lvl_array) * extract * 2)))
return_0;
if (!(error_segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_ERROR)))
if (!(error_segtype = get_segtype_from_string(lv->vg->cmd, "error")))
return_0;
/*
@@ -1105,7 +1101,7 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
}
if (!seg_is_mirrored(first_seg(lv)) ||
seg_is_raid10(first_seg(lv))) {
!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID10)) {
log_error("Unable to split logical volume of segment type, %s",
lvseg_name(first_seg(lv)));
return 0;
@@ -1478,11 +1474,13 @@ int lv_raid_reshape(struct logical_volume *lv,
return 0;
}
if (seg_is_mirror(seg) && segtype_is_raid1(new_segtype))
return _convert_mirror_to_raid1(lv, new_segtype);
if (!strcmp(seg->segtype->name, "mirror") &&
(!strcmp(new_segtype->name, SEG_TYPE_NAME_RAID1)))
return _convert_mirror_to_raid1(lv, new_segtype);
log_error("Converting the segment type for %s/%s from %s to %s is not yet supported.",
lv->vg->name, lv->name, lvseg_name(seg), new_segtype->name);
log_error("Converting the segment type for %s/%s from %s to %s"
" is not yet supported.", lv->vg->name, lv->name,
lvseg_name(seg), new_segtype->name);
return 0;
}
@@ -1664,7 +1662,7 @@ int lv_raid_replace(struct logical_volume *lv,
lvseg_name(raid_seg),
lv->vg->name, lv->name);
return 0;
} else if (seg_is_raid10(raid_seg)) {
} else if (!strcmp(raid_seg->segtype->name, SEG_TYPE_NAME_RAID10)) {
uint32_t i, rebuilds_per_group = 0;
/* FIXME: We only support 2-way mirrors in RAID10 currently */
uint32_t copies = 2;
@@ -1896,7 +1894,7 @@ static int _partial_raid_lv_is_redundant(const struct logical_volume *lv)
uint32_t i, s, rebuilds_per_group = 0;
uint32_t failed_components = 0;
if (seg_is_raid10(raid_seg)) {
if (!strcmp(raid_seg->segtype->name, SEG_TYPE_NAME_RAID10)) {
/* FIXME: We only support 2-way mirrors in RAID10 currently */
copies = 2;
for (i = 0; i < raid_seg->area_count * copies; i++) {

View File

@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -28,95 +28,32 @@ struct dm_config_node;
struct dev_manager;
/* Feature flags */
#define SEG_CAN_SPLIT 0x0000000000000001ULL
#define SEG_AREAS_STRIPED 0x0000000000000002ULL
#define SEG_AREAS_MIRRORED 0x0000000000000004ULL
#define SEG_SNAPSHOT 0x0000000000000008ULL
#define SEG_FORMAT1_SUPPORT 0x0000000000000010ULL
#define SEG_VIRTUAL 0x0000000000000020ULL
#define SEG_CANNOT_BE_ZEROED 0x0000000000000040ULL
#define SEG_MONITORED 0x0000000000000080ULL
#define SEG_REPLICATOR 0x0000000000000100ULL
#define SEG_REPLICATOR_DEV 0x0000000000000200ULL
#define SEG_RAID 0x0000000000000400ULL
#define SEG_THIN_POOL 0x0000000000000800ULL
#define SEG_THIN_VOLUME 0x0000000000001000ULL
#define SEG_CACHE 0x0000000000002000ULL
#define SEG_CACHE_POOL 0x0000000000004000ULL
#define SEG_MIRROR 0x0000000000008000ULL
#define SEG_ONLY_EXCLUSIVE 0x0000000000010000ULL /* In cluster only exlusive activation */
#define SEG_CAN_ERROR_WHEN_FULL 0x0000000000020000ULL
#define SEG_CAN_SPLIT 0x00000001U
#define SEG_AREAS_STRIPED 0x00000002U
#define SEG_AREAS_MIRRORED 0x00000004U
#define SEG_SNAPSHOT 0x00000008U
#define SEG_FORMAT1_SUPPORT 0x00000010U
#define SEG_VIRTUAL 0x00000020U
#define SEG_CANNOT_BE_ZEROED 0x00000040U
#define SEG_MONITORED 0x00000080U
#define SEG_REPLICATOR 0x00000100U
#define SEG_REPLICATOR_DEV 0x00000200U
#define SEG_RAID 0x00000400U
#define SEG_THIN_POOL 0x00000800U
#define SEG_THIN_VOLUME 0x00001000U
#define SEG_CACHE 0x00002000U
#define SEG_CACHE_POOL 0x00004000U
#define SEG_MIRROR 0x00008000U
#define SEG_ONLY_EXCLUSIVE 0x00010000U /* In cluster only exlusive activation */
#define SEG_CAN_ERROR_WHEN_FULL 0x00020000U
#define SEG_UNKNOWN 0x80000000U
#define SEG_RAID1 0x0000000000100000ULL
#define SEG_RAID10 0x0000000000200000ULL
#define SEG_RAID4 0x0000000000400000ULL
#define SEG_RAID5_N 0x0000000000800000ULL
#define SEG_RAID5_LA 0x0000000001000000ULL
#define SEG_RAID5_LS 0x0000000002000000ULL
#define SEG_RAID5_RA 0x0000000004000000ULL
#define SEG_RAID5_RS 0x0000000008000000ULL
#define SEG_RAID5 SEG_RAID5_LS
#define SEG_RAID6_NC 0x0000000010000000ULL
#define SEG_RAID6_NR 0x0000000020000000ULL
#define SEG_RAID6_ZR 0x0000000040000000ULL
#define SEG_RAID6_LA_6 0x0000000080000000ULL
#define SEG_RAID6_LS_6 0x0000000100000000ULL
#define SEG_RAID6_RA_6 0x0000000200000000ULL
#define SEG_RAID6_RS_6 0x0000000400000000ULL
#define SEG_RAID6_N_6 0x0000000800000000ULL
#define SEG_RAID6 SEG_RAID6_ZR
#define SEG_UNKNOWN 0x8000000000000000ULL
#define SEG_TYPE_NAME_LINEAR "linear"
#define SEG_TYPE_NAME_STRIPED "striped"
#define SEG_TYPE_NAME_MIRROR "mirror"
#define SEG_TYPE_NAME_SNAPSHOT "snapshot"
#define SEG_TYPE_NAME_THIN "thin"
#define SEG_TYPE_NAME_THIN_POOL "thin-pool"
#define SEG_TYPE_NAME_CACHE "cache"
#define SEG_TYPE_NAME_CACHE_POOL "cache-pool"
#define SEG_TYPE_NAME_ERROR "error"
#define SEG_TYPE_NAME_FREE "free"
#define SEG_TYPE_NAME_ZERO "zero"
#define SEG_TYPE_NAME_RAID "raid"
#define SEG_TYPE_NAME_RAID0 "raid0"
#define SEG_TYPE_NAME_RAID1 "raid1"
#define SEG_TYPE_NAME_RAID10 "raid10"
#define SEG_TYPE_NAME_RAID4 "raid4"
#define SEG_TYPE_NAME_RAID5 "raid5"
#define SEG_TYPE_NAME_RAID5_LA "raid5_la"
#define SEG_TYPE_NAME_RAID5_LS "raid5_ls"
#define SEG_TYPE_NAME_RAID5_RA "raid5_ra"
#define SEG_TYPE_NAME_RAID5_RS "raid5_rs"
#define SEG_TYPE_NAME_RAID6 "raid6"
#define SEG_TYPE_NAME_RAID6_NC "raid6_nc"
#define SEG_TYPE_NAME_RAID6_NR "raid6_nr"
#define SEG_TYPE_NAME_RAID6_ZR "raid6_zr"
#define segtype_is_linear(segtype) (!strcmp(segtype->name, SEG_TYPE_NAME_LINEAR))
#define segtype_is_cache(segtype) ((segtype)->flags & SEG_CACHE ? 1 : 0)
#define segtype_is_cache_pool(segtype) ((segtype)->flags & SEG_CACHE_POOL ? 1 : 0)
#define segtype_is_mirrored(segtype) ((segtype)->flags & SEG_AREAS_MIRRORED ? 1 : 0)
#define segtype_is_mirror(segtype) ((segtype)->flags & SEG_MIRROR ? 1 : 0)
#define segtype_is_pool(segtype) ((segtype)->flags & (SEG_CACHE_POOL | SEG_THIN_POOL) ? 1 : 0)
#define segtype_is_raid(segtype) ((segtype)->flags & SEG_RAID ? 1 : 0)
#define segtype_is_raid1(segtype) ((segtype)->flags & SEG_RAID1 ? 1 : 0)
#define segtype_is_raid4(segtype) ((segtype)->flags & SEG_RAID4 ? 1 : 0)
#define segtype_is_any_raid5(segtype) ((segtype)->flags & \
(SEG_RAID5_LS|SEG_RAID5_LA|SEG_RAID5_RS|SEG_RAID5_RA|SEG_RAID5_N) ? 1 : 0)
#define segtype_is_raid5_la(segtype) ((segtype)->flags & SEG_RAID5_LA ? 1 : 0)
#define segtype_is_raid5_ra(segtype) ((segtype)->flags & SEG_RAID5_RA ? 1 : 0)
#define segtype_is_raid5_ls(segtype) ((segtype)->flags & SEG_RAID5_LS ? 1 : 0)
#define segtype_is_raid5_rs(segtype) ((segtype)->flags & SEG_RAID5_RS ? 1 : 0)
#define segtype_is_any_raid6(segtype) ((segtype)->flags & \
(SEG_RAID6_ZR|SEG_RAID6_NC|SEG_RAID6_NR| \
SEG_RAID6_LA_6|SEG_RAID6_LS_6|SEG_RAID6_RA_6|SEG_RAID6_RS_6|SEG_RAID6_N_6) ? 1 : 0)
#define segtype_is_raid6_nc(segtype) ((segtype)->flags & SEG_RAID6_NC ? 1 : 0)
#define segtype_is_raid6_nr(segtype) ((segtype)->flags & SEG_RAID6_NR ? 1 : 0)
#define segtype_is_raid6_zr(segtype) ((segtype)->flags & SEG_RAID6_ZR ? 1 : 0)
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
#define segtype_is_snapshot(segtype) ((segtype)->flags & SEG_SNAPSHOT ? 1 : 0)
#define segtype_is_striped(segtype) ((segtype)->flags & SEG_AREAS_STRIPED ? 1 : 0)
#define segtype_is_thin(segtype) ((segtype)->flags & (SEG_THIN_POOL|SEG_THIN_VOLUME) ? 1 : 0)
#define segtype_is_thin_pool(segtype) ((segtype)->flags & SEG_THIN_POOL ? 1 : 0)
@@ -131,21 +68,9 @@ struct dev_manager;
#define seg_is_mirrored(seg) segtype_is_mirrored((seg)->segtype)
#define seg_is_pool(seg) segtype_is_pool((seg)->segtype)
#define seg_is_raid(seg) segtype_is_raid((seg)->segtype)
#define seg_is_raid1(seg) segtype_is_raid1((seg)->segtype)
#define seg_is_raid4(seg) segtype_is_raid4((seg)->segtype)
#define seg_is_any_raid5(seg) segtype_is_any_raid5((seg)->segtype)
#define seg_is_raid5_la(seg) segtype_is_raid5_la((seg)->segtype)
#define seg_is_raid5_ra(seg) segtype_is_raid5_ra((seg)->segtype)
#define seg_is_raid5_ls(seg) segtype_is_raid5_ls((seg)->segtype)
#define seg_is_raid5_rs(seg) segtype_is_raid5_rs((seg)->segtype)
#define seg_is_any_raid6(seg) segtype_is_any_raid6((seg)->segtype)
#define seg_is_raid6_zr(seg) segtype_is_raid6_zr((seg)->segtype)
#define seg_is_raid6_nr(seg) segtype_is_raid6_nr((seg)->segtype)
#define seg_is_raid6_nc(seg) segtype_is_raid6_nc((seg)->segtype)
#define seg_is_raid10(seg) segtype_is_raid10((seg)->segtype)
#define seg_is_replicator(seg) ((seg)->segtype->flags & SEG_REPLICATOR ? 1 : 0)
#define seg_is_replicator_dev(seg) ((seg)->segtype->flags & SEG_REPLICATOR_DEV ? 1 : 0)
#define seg_is_snapshot(seg) segtype_is_snapshot((seg)->segtype)
#define seg_is_snapshot(seg) ((seg)->segtype->flags & SEG_SNAPSHOT ? 1 : 0)
#define seg_is_striped(seg) segtype_is_striped((seg)->segtype)
#define seg_is_thin(seg) segtype_is_thin((seg)->segtype)
#define seg_is_thin_pool(seg) segtype_is_thin_pool((seg)->segtype)
@@ -161,8 +86,8 @@ struct dev_manager;
struct segment_type {
struct dm_list list; /* Internal */
uint64_t flags;
uint32_t parity_devs; /* Parity drives required by segtype */
uint32_t flags;
uint32_t parity_devs; /* Parity drives required by segtype */
struct segtype_handler *ops;
const char *name;
@@ -227,13 +152,24 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
const char *name);
#define RAID_FEATURE_RAID10 (1U << 0) /* version 1.3 */
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.7 */
#define RAID_FEATURE_RESHAPING (1U << 2) /* version 1.8 */
#ifdef RAID_INTERNAL
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
#define SEG_TYPE_NAME_RAID1 "raid1"
#define SEG_TYPE_NAME_RAID10 "raid10"
#define SEG_TYPE_NAME_RAID4 "raid4"
#define SEG_TYPE_NAME_RAID5 "raid5"
#define SEG_TYPE_NAME_RAID5_LA "raid5_la"
#define SEG_TYPE_NAME_RAID5_LS "raid5_ls"
#define SEG_TYPE_NAME_RAID5_RA "raid5_ra"
#define SEG_TYPE_NAME_RAID5_RS "raid5_rs"
#define SEG_TYPE_NAME_RAID6 "raid6"
#define SEG_TYPE_NAME_RAID6_NC "raid6_nc"
#define SEG_TYPE_NAME_RAID6_NR "raid6_nr"
#define SEG_TYPE_NAME_RAID6_ZR "raid6_zr"
#ifdef REPLICATOR_INTERNAL
int init_replicator_segtype(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
@@ -255,9 +191,6 @@ int init_thin_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
int init_cache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
#define CACHE_FEATURE_POLICY_MQ (1U << 0)
#define CACHE_FEATURE_POLICY_SMQ (1U << 1)
#define SNAPSHOT_FEATURE_FIXED_LEAK (1U << 0) /* version 1.12 */
#ifdef SNAPSHOT_INTERNAL

View File

@@ -44,7 +44,7 @@ static uint64_t _cow_extra_chunks(struct cmd_context *cmd, uint64_t n_chunks)
unsigned attrs = 0;
if (activation() &&
(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_SNAPSHOT)) &&
(segtype = get_segtype_from_string(cmd, "snapshot")) &&
segtype->ops->target_present &&
segtype->ops->target_present(cmd, NULL, &attrs) &&
(attrs & SNAPSHOT_FEATURE_FIXED_LEAK))

View File

@@ -63,10 +63,6 @@ int attach_pool_message(struct lv_segment *pool_seg, dm_thin_message_t type,
tmsg->type = type;
/* If the 1st message is add in non-read-only mode, modify transaction_id */
if (!no_update && dm_list_empty(&pool_seg->thin_messages))
pool_seg->transaction_id++;
dm_list_add(&pool_seg->thin_messages, &tmsg->list);
log_debug_metadata("Added %s message.",
@@ -216,7 +212,7 @@ int thin_pool_feature_supported(const struct logical_volume *lv, int feature)
int pool_below_threshold(const struct lv_segment *pool_seg)
{
dm_percent_t percent;
dm_percent_t threshold = DM_PERCENT_1 *
int threshold = DM_PERCENT_1 *
find_config_tree_int(pool_seg->lv->vg->cmd, activation_thin_pool_autoextend_threshold_CFG,
lv_config_profile(pool_seg->lv));
@@ -224,27 +220,15 @@ int pool_below_threshold(const struct lv_segment *pool_seg)
if (!lv_thin_pool_percent(pool_seg->lv, 0, &percent))
return_0;
if (percent > threshold) {
log_debug("Threshold configured for free data space in "
"thin pool %s has been reached (%.2f%% >= %.2f%%).",
display_lvname(pool_seg->lv),
dm_percent_to_float(percent),
dm_percent_to_float(threshold));
if (percent >= threshold)
return 0;
}
/* Metadata */
if (!lv_thin_pool_percent(pool_seg->lv, 1, &percent))
return_0;
if (percent > threshold) {
log_debug("Threshold configured for free metadata space in "
"thin pool %s has been reached (%.2f%% > %.2f%%).",
display_lvname(pool_seg->lv),
dm_percent_to_float(percent),
dm_percent_to_float(threshold));
if (percent >= threshold)
return 0;
}
return 1;
}
@@ -492,6 +476,9 @@ int update_pool_lv(struct logical_volume *lv, int activate)
dm_list_init(&(first_seg(lv)->thin_messages));
/* thin-pool target transaction is finished, increase lvm2 TID */
first_seg(lv)->transaction_id++;
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;

View File

@@ -20,6 +20,7 @@
#include "toolcontext.h"
#include "lvmcache.h"
#include "archiver.h"
#include "lvmlockd.h"
struct volume_group *alloc_vg(const char *pool_name, struct cmd_context *cmd,
const char *vg_name)

View File

@@ -620,7 +620,7 @@ struct segment_type *init_segtype(struct cmd_context *cmd)
return_NULL;
segtype->ops = &_mirrored_ops;
segtype->name = SEG_TYPE_NAME_MIRROR;
segtype->name = "mirror";
segtype->flags = SEG_MIRROR | SEG_AREAS_MIRRORED;
#ifdef DEVMAPPER_SUPPORT

2
lib/misc/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
configure.h
lvm-version.h

View File

@@ -1,4 +1,4 @@
/* include/configure.h.in. Generated from configure.in by autoheader. */
/* lib/misc/configure.h.in. Generated from configure.in by autoheader. */
/* Define to 1 to use libblkid detection of signatures when wiping. */
#undef BLKID_WIPING_SUPPORT
@@ -460,7 +460,7 @@
/* Define to 1 if you have the `strtoull' function. */
#undef HAVE_STRTOULL
/* Define to 1 if `st_rdev' is a member of `struct stat'. */
/* Define to 1 if `st_rdev' is member of `struct stat'. */
#undef HAVE_STRUCT_STAT_ST_RDEV
/* Define to 1 if you have the <syslog.h> header file. */
@@ -513,9 +513,6 @@
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/timerfd.h> header file. */
#undef HAVE_SYS_TIMERFD_H
/* Define to 1 if you have the <sys/time.h> header file. */
#undef HAVE_SYS_TIME_H
@@ -643,9 +640,6 @@
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the home page for this package. */
#undef PACKAGE_URL
/* Define to the version of this package. */
#undef PACKAGE_VERSION

View File

@@ -25,57 +25,14 @@
#define _GNU_SOURCE
#define _FILE_OFFSET_BITS 64
/*
* Symbol export control macros
*
* DM_EXPORT_SYMBOL(func,ver)
* DM_EXPORT_SYMBOL_BASE(func,ver)
*
* For functions that have multiple implementations these macros control
* symbol export and versioning.
*
* Function definitions that exist in only one version never need to use
* these macros.
*
* Backwards compatible implementations must include a version tag of
* the form "_v1_02_104" as a suffix to the function name and use the
* macro DM_EXPORT_SYMBOL to export the function and bind it to the
* specified version string.
*
* Since versioning is only available when compiling with GCC the entire
* compatibility version should be enclosed in '#if defined(__GNUC__)',
* for example:
*
* int dm_foo(int bar)
* {
* return bar;
* }
*
* #if defined(__GNUC__)
* // Backward compatible dm_foo() version 1.02.104
* int dm_foo_v1_02_104(void);
* int dm_foo_v1_02_104(void)
* {
* return 0;
* }
* DM_EXPORT_SYMBOL(dm_foo,1_02_104)
* #endif
*
* A prototype for the compatibility version is required as these
* functions must not be declared static.
*
* The DM_EXPORT_SYMBOL_BASE macro is only used to export the base
* versions of library symbols prior to the introduction of symbol
* versioning: it must never be used for new symbols.
*/
#if defined(__GNUC__)
#define DM_EXPORT_SYMBOL(func, ver) \
__asm__(".symver " #func "_v" #ver ", " #func "@DM_" #ver )
#define DM_EXPORT_SYMBOL_BASE(func) \
#define DM_EXPORTED_SYMBOL(func, ver) \
__asm__(".symver " #func "_v" #ver ", " #func "@@DM_" #ver )
#define DM_EXPORTED_SYMBOL_BASE(func) \
__asm__(".symver " #func "_base, " #func "@Base" )
#else
#define DM_EXPORT_SYMBOL(func, ver)
#define DM_EXPORT_SYMBOL_BASE(func)
#define DM_EXPORTED_SYMBOL(func, ver)
#define DM_EXPORTED_SYMBOL_BASE(func)
#endif

View File

@@ -40,8 +40,6 @@ static int _security_level = SECURITY_LEVEL;
static char _cmd_name[30] = "";
static int _mirror_in_sync = 0;
static int _dmeventd_monitor = DEFAULT_DMEVENTD_MONITOR;
/* When set, disables update of _dmeventd_monitor & _ignore_suspended_devices */
static int _disable_dmeventd_monitoring = 0;
static int _background_polling = DEFAULT_BACKGROUND_POLLING;
static int _ignore_suspended_devices = 0;
static int _ignore_lvm_mirrors = DEFAULT_IGNORE_LVM_MIRRORS;
@@ -125,13 +123,7 @@ void init_mirror_in_sync(int in_sync)
void init_dmeventd_monitor(int reg)
{
if (!_disable_dmeventd_monitoring)
_dmeventd_monitor = reg;
}
void init_disable_dmeventd_monitoring(int reg)
{
_disable_dmeventd_monitoring = reg;
_dmeventd_monitor = reg;
}
void init_background_polling(int polling)
@@ -141,8 +133,7 @@ void init_background_polling(int polling)
void init_ignore_suspended_devices(int ignore)
{
if (!_disable_dmeventd_monitoring)
_ignore_suspended_devices = ignore;
_ignore_suspended_devices = ignore;
}
void init_ignore_lvm_mirrors(int scan)

View File

@@ -38,7 +38,6 @@ void init_lockingfailed(int level);
void init_security_level(int level);
void init_mirror_in_sync(int in_sync);
void init_dmeventd_monitor(int reg);
void init_disable_dmeventd_monitoring(int disable);
void init_background_polling(int polling);
void init_ignore_suspended_devices(int ignore);
void init_ignore_lvm_mirrors(int scan);

Some files were not shown because too many files have changed in this diff Show More