1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00
lvm2/lib/locking/locking.c
2018-06-01 13:04:12 +01:00

473 lines
12 KiB
C

/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "lib/locking/locking.h"
#include "locking_types.h"
#include "lib/misc/lvm-string.h"
#include "lib/activate/activate.h"
#include "lib/commands/toolcontext.h"
#include "lib/mm/memlock.h"
#include "lib/config/defaults.h"
#include "lib/cache/lvmcache.h"
#include "lib/misc/lvm-signal.h"
#include <assert.h>
#include <sys/stat.h>
#include <limits.h>
#include <unistd.h>
static struct locking_type _locking;
static int _vg_lock_count = 0; /* Number of locks held */
static int _vg_write_lock_held = 0; /* VG write lock held? */
static int _blocking_supported = 0;
typedef enum {
LV_NOOP,
LV_SUSPEND,
LV_RESUME
} lv_operation_t;
static void _lock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_SUSPEND)
critical_section_inc(cmd, "locking for suspend");
}
static void _unlock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_RESUME)
critical_section_dec(cmd, "unlocking on resume");
}
static void _unblock_signals(void)
{
/* Don't unblock signals while any locks are held */
if (!_vg_lock_count)
unblock_signals();
}
void reset_locking(void)
{
int was_locked = _vg_lock_count;
_vg_lock_count = 0;
_vg_write_lock_held = 0;
if (_locking.reset_locking)
_locking.reset_locking();
if (was_locked)
_unblock_signals();
memlock_reset();
}
static void _update_vg_lock_count(const char *resource, uint32_t flags)
{
/* Ignore locks not associated with updating VG metadata */
if ((flags & LCK_SCOPE_MASK) != LCK_VG ||
(flags & LCK_CACHE) ||
!strcmp(resource, VG_GLOBAL))
return;
if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
_vg_lock_count--;
else
_vg_lock_count++;
/* We don't bother to reset this until all VG locks are dropped */
if ((flags & LCK_TYPE_MASK) == LCK_WRITE)
_vg_write_lock_held = 1;
else if (!_vg_lock_count)
_vg_write_lock_held = 0;
}
/*
* Select a locking type
* type: locking type; if < 0, then read config tree value
*/
int init_locking(int type, struct cmd_context *cmd, int suppress_messages)
{
if (getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
suppress_messages = 1;
if (type < 0)
type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);
_blocking_supported = find_config_tree_bool(cmd, global_wait_for_locks_CFG, NULL);
switch (type) {
case 0:
init_no_locking(&_locking, cmd, suppress_messages);
log_warn_suppress(suppress_messages,
"WARNING: Locking disabled. Be careful! "
"This could corrupt your metadata.");
return 1;
case 1:
log_very_verbose("%sFile-based locking selected.",
_blocking_supported ? "" : "Non-blocking ");
if (!init_file_locking(&_locking, cmd, suppress_messages)) {
log_error_suppress(suppress_messages,
"File-based locking initialisation failed.");
break;
}
return 1;
#ifdef HAVE_LIBDL
case 2:
if (!is_static()) {
log_very_verbose("External locking selected.");
if (init_external_locking(&_locking, cmd, suppress_messages))
return 1;
}
if (!find_config_tree_bool(cmd, global_fallback_to_clustered_locking_CFG, NULL)) {
log_error_suppress(suppress_messages, "External locking initialisation failed.");
break;
}
#endif
log_very_verbose("Falling back to internal clustered locking.");
/* Fall through */
case 3:
#ifdef CLUSTER_LOCKING_INTERNAL
log_very_verbose("Cluster locking selected.");
if (!init_cluster_locking(&_locking, cmd, suppress_messages)) {
log_error_suppress(suppress_messages,
"Internal cluster locking initialisation failed.");
break;
}
return 1;
#else
log_warn("WARNING: Using locking_type=1, ignoring locking_type=3.");
log_warn("WARNING: See lvmlockd(8) for information on using cluster/clvm VGs.");
type = 1;
log_very_verbose("%sFile-based locking selected.",
_blocking_supported ? "" : "Non-blocking ");
if (!init_file_locking(&_locking, cmd, suppress_messages)) {
log_error_suppress(suppress_messages,
"File-based locking initialisation failed.");
break;
}
return 1;
#endif
case 4:
log_verbose("Read-only locking selected. "
"Only read operations permitted.");
if (!init_readonly_locking(&_locking, cmd, suppress_messages))
break;
return 1;
case 5:
init_dummy_locking(&_locking, cmd, suppress_messages);
log_verbose("Locking disabled for read-only access.");
return 1;
default:
log_error("Unknown locking type requested.");
return 0;
}
if ((type == 2 || type == 3) &&
find_config_tree_bool(cmd, global_fallback_to_local_locking_CFG, NULL)) {
log_warn_suppress(suppress_messages, "WARNING: Falling back to local file-based locking.");
log_warn_suppress(suppress_messages,
"Volume Groups with the clustered attribute will "
"be inaccessible.");
if (init_file_locking(&_locking, cmd, suppress_messages))
return 1;
log_error_suppress(suppress_messages,
"File-based locking initialisation failed.");
}
if (!ignorelockingfailure())
return 0;
log_verbose("Locking disabled - only read operations permitted.");
init_readonly_locking(&_locking, cmd, suppress_messages);
return 1;
}
void fin_locking(void)
{
_locking.fin_locking();
}
/*
* VG locking is by VG name.
* FIXME This should become VG uuid.
*/
static int _lock_vol(struct cmd_context *cmd, const char *resource,
uint32_t flags, lv_operation_t lv_op, const struct logical_volume *lv)
{
uint32_t lck_type = flags & LCK_TYPE_MASK;
uint32_t lck_scope = flags & LCK_SCOPE_MASK;
int ret = 0;
const struct logical_volume *active_lv;
block_signals(flags);
_lock_memory(cmd, lv_op);
assert(resource);
if (!*resource) {
log_error(INTERNAL_ERROR "Use of P_orphans is deprecated.");
goto out;
}
if ((is_orphan_vg(resource) || is_global_vg(resource)) && (flags & LCK_CACHE)) {
log_error(INTERNAL_ERROR "P_%s referenced.", resource);
goto out;
}
/* When trying activating component LV, make sure none of
* sub component LV or LVs that are using it are active */
if (lv && ((lck_type == LCK_READ) || (lck_type == LCK_EXCL)) &&
((!lv_is_visible(lv) && (active_lv = lv_holder_is_active(lv))) ||
(active_lv = lv_component_is_active(lv)))) {
log_error("Activation of logical volume %s is prohibited while logical volume %s is active.",
display_lvname(lv), display_lvname(active_lv));
goto out;
}
if (cmd->metadata_read_only && lck_type == LCK_WRITE &&
strcmp(resource, VG_GLOBAL)) {
log_error("Operation prohibited while global/metadata_read_only is set.");
goto out;
}
if ((ret = _locking.lock_resource(cmd, resource, flags, lv))) {
if (lck_scope == LCK_VG && !(flags & LCK_CACHE)) {
if (lck_type != LCK_UNLOCK)
lvmcache_lock_vgname(resource, lck_type == LCK_READ);
dev_reset_error_count(cmd);
}
_update_vg_lock_count(resource, flags);
} else
stack;
/* If unlocking, always remove lock from lvmcache even if operation failed. */
if (lck_scope == LCK_VG && !(flags & LCK_CACHE) && lck_type == LCK_UNLOCK) {
lvmcache_unlock_vgname(resource);
if (!ret)
_update_vg_lock_count(resource, flags);
}
out:
_unlock_memory(cmd, lv_op);
_unblock_signals();
return ret;
}
int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const struct logical_volume *lv)
{
char resource[258] __attribute__((aligned(8)));
lv_operation_t lv_op;
int lck_type = flags & LCK_TYPE_MASK;
switch (flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) {
case LCK_LV_SUSPEND:
lv_op = LV_SUSPEND;
break;
case LCK_LV_RESUME:
lv_op = LV_RESUME;
break;
default: lv_op = LV_NOOP;
}
if (flags == LCK_NONE) {
log_debug_locking(INTERNAL_ERROR "%s: LCK_NONE lock requested", vol);
return 1;
}
switch (flags & LCK_SCOPE_MASK) {
case LCK_ACTIVATION:
break;
case LCK_VG:
if (!_blocking_supported)
flags |= LCK_NONBLOCK;
/* Global VG_ORPHANS lock covers all orphan formats. */
if (is_orphan_vg(vol))
vol = VG_ORPHANS;
/* VG locks alphabetical, ORPHAN lock last */
if ((lck_type != LCK_UNLOCK) &&
!(flags & LCK_CACHE) &&
!lvmcache_verify_lock_order(vol))
return_0;
if ((flags == LCK_VG_DROP_CACHE) ||
(strcmp(vol, VG_GLOBAL) && strcmp(vol, VG_SYNC_NAMES))) {
/* Skip dropping cache for internal VG names #global, #sync_names */
log_debug_locking("Dropping cache for %s.", vol);
lvmcache_drop_metadata(vol, 0);
}
break;
case LCK_LV:
/* All LV locks are non-blocking. */
flags |= LCK_NONBLOCK;
break;
default:
log_error("Unrecognised lock scope: %d",
flags & LCK_SCOPE_MASK);
return 0;
}
if (!dm_strncpy(resource, vol, sizeof(resource))) {
log_error(INTERNAL_ERROR "Resource name %s is too long.", vol);
return 0;
}
if (!_lock_vol(cmd, resource, flags, lv_op, lv))
return_0;
/*
* If a real lock was acquired (i.e. not LCK_CACHE),
* perform an immediate unlock unless LCK_HOLD was requested.
*/
if ((lck_type == LCK_NULL) || (lck_type == LCK_UNLOCK) ||
(flags & (LCK_CACHE | LCK_HOLD)))
return 1;
if (!_lock_vol(cmd, resource, (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op, lv))
return_0;
return 1;
}
/*
* First try to activate exclusively locally.
* Then if the VG is clustered and the LV is not yet active (e.g. due to
* an activation filter) try activating on remote nodes.
*/
int activate_lv_excl(struct cmd_context *cmd, const struct logical_volume *lv)
{
/* Non-clustered VGs are only activated locally. */
if (!vg_is_clustered(lv->vg))
return activate_lv_excl_local(cmd, lv);
if (lv_is_active_exclusive_locally(lv))
return 1;
if (!activate_lv_excl_local(cmd, lv))
return_0;
if (lv_is_active_exclusive(lv))
return 1;
/* FIXME Deal with error return codes. */
if (!activate_lv_excl_remote(cmd, lv))
return_0;
return 1;
}
/* Lock a list of LVs */
int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive)
{
struct dm_list *lvh;
struct lv_list *lvl;
dm_list_iterate_items(lvl, lvs) {
if (!exclusive && !lv_is_active_exclusive(lvl->lv)) {
if (!activate_lv(cmd, lvl->lv)) {
log_error("Failed to activate %s", display_lvname(lvl->lv));
return 0;
}
} else if (!activate_lv_excl(cmd, lvl->lv)) {
log_error("Failed to activate %s", display_lvname(lvl->lv));
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
if (!deactivate_lv(cmd, lvl->lv))
stack;
}
return 0;
}
}
return 1;
}
int vg_write_lock_held(void)
{
return _vg_write_lock_held;
}
int locking_is_clustered(void)
{
return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
}
int locking_supports_remote_queries(void)
{
return (_locking.flags & LCK_SUPPORTS_REMOTE_QUERIES) ? 1 : 0;
}
int cluster_lock_held(const char *vol, const char *node, int *exclusive)
{
int mode = LCK_NULL;
if (!locking_is_clustered())
return 0;
if (!_locking.query_resource)
return -1;
/*
* If an error occured, expect that volume is active
*/
if (!_locking.query_resource(vol, node, &mode)) {
stack;
return 1;
}
if (exclusive)
*exclusive = (mode == LCK_EXCL);
return mode == LCK_NULL ? 0 : 1;
}
int sync_local_dev_names(struct cmd_context* cmd)
{
memlock_unlock(cmd);
return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC_LOCAL, NULL);
}
int sync_dev_names(struct cmd_context* cmd)
{
memlock_unlock(cmd);
return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC, NULL);
}