1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-03-10 16:58:47 +03:00
lvm2/lib/locking/locking.c
David Teigland 18259d5559 Remove unused clvm variations for active LVs
Different flavors of activate_lv() and lv_is_active()
which are meaningful in a clustered VG can be eliminated
and replaced with whatever that flavor already falls back
to in a local VG.

e.g. lv_is_active_exclusive_locally() is distinct from
lv_is_active() in a clustered VG, but in a local VG they
are equivalent.  So, all instances of the variant are
replaced with the basic local equivalent.

For local VGs, the same behavior remains as before.
For shared VGs, lvmlockd was written with the explicit
requirement of local behavior from these functions
(lvmlockd requires locking_type 1), so the behavior
in shared VGs also remains the same.
2018-06-07 16:17:04 +01:00

321 lines
7.9 KiB
C

/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib/misc/lib.h"
#include "lib/locking/locking.h"
#include "locking_types.h"
#include "lib/misc/lvm-string.h"
#include "lib/activate/activate.h"
#include "lib/commands/toolcontext.h"
#include "lib/mm/memlock.h"
#include "lib/config/defaults.h"
#include "lib/cache/lvmcache.h"
#include "lib/misc/lvm-signal.h"
#include <assert.h>
#include <sys/stat.h>
#include <limits.h>
#include <unistd.h>
static struct locking_type _locking;
static int _vg_lock_count = 0; /* Number of locks held */
static int _vg_write_lock_held = 0; /* VG write lock held? */
static int _blocking_supported = 0;
typedef enum {
LV_NOOP,
LV_SUSPEND,
LV_RESUME
} lv_operation_t;
static void _lock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_SUSPEND)
critical_section_inc(cmd, "locking for suspend");
}
static void _unlock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_RESUME)
critical_section_dec(cmd, "unlocking on resume");
}
static void _unblock_signals(void)
{
/* Don't unblock signals while any locks are held */
if (!_vg_lock_count)
unblock_signals();
}
void reset_locking(void)
{
int was_locked = _vg_lock_count;
_vg_lock_count = 0;
_vg_write_lock_held = 0;
if (_locking.reset_locking)
_locking.reset_locking();
if (was_locked)
_unblock_signals();
memlock_reset();
}
static void _update_vg_lock_count(const char *resource, uint32_t flags)
{
/* Ignore locks not associated with updating VG metadata */
if ((flags & LCK_SCOPE_MASK) != LCK_VG ||
(flags & LCK_CACHE) ||
!strcmp(resource, VG_GLOBAL))
return;
if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
_vg_lock_count--;
else
_vg_lock_count++;
/* We don't bother to reset this until all VG locks are dropped */
if ((flags & LCK_TYPE_MASK) == LCK_WRITE)
_vg_write_lock_held = 1;
else if (!_vg_lock_count)
_vg_write_lock_held = 0;
}
/*
* Select a locking type
* type: locking type; if < 0, then read config tree value
*/
int init_locking(int type, struct cmd_context *cmd, int suppress_messages)
{
if (getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
suppress_messages = 1;
if (type < 0)
type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);
_blocking_supported = find_config_tree_bool(cmd, global_wait_for_locks_CFG, NULL);
if (type != 1)
log_warn("WARNING: locking_type deprecated, using file locking.");
if (type == 3)
log_warn("WARNING: See lvmlockd(8) for information on using cluster/clvm VGs.");
log_very_verbose("%sFile-based locking selected.", _blocking_supported ? "" : "Non-blocking ");
if (!init_file_locking(&_locking, cmd, suppress_messages))
log_error_suppress(suppress_messages, "File-based locking initialisation failed.");
return 1;
}
void fin_locking(void)
{
_locking.fin_locking();
}
/*
* VG locking is by VG name.
* FIXME This should become VG uuid.
*/
static int _lock_vol(struct cmd_context *cmd, const char *resource,
uint32_t flags, lv_operation_t lv_op, const struct logical_volume *lv)
{
uint32_t lck_type = flags & LCK_TYPE_MASK;
uint32_t lck_scope = flags & LCK_SCOPE_MASK;
int ret = 0;
const struct logical_volume *active_lv;
block_signals(flags);
_lock_memory(cmd, lv_op);
assert(resource);
if (!*resource) {
log_error(INTERNAL_ERROR "Use of P_orphans is deprecated.");
goto out;
}
if ((is_orphan_vg(resource) || is_global_vg(resource)) && (flags & LCK_CACHE)) {
log_error(INTERNAL_ERROR "P_%s referenced.", resource);
goto out;
}
/* When trying activating component LV, make sure none of
* sub component LV or LVs that are using it are active */
if (lv && ((lck_type == LCK_READ) || (lck_type == LCK_EXCL)) &&
((!lv_is_visible(lv) && (active_lv = lv_holder_is_active(lv))) ||
(active_lv = lv_component_is_active(lv)))) {
log_error("Activation of logical volume %s is prohibited while logical volume %s is active.",
display_lvname(lv), display_lvname(active_lv));
goto out;
}
if (cmd->metadata_read_only && lck_type == LCK_WRITE &&
strcmp(resource, VG_GLOBAL)) {
log_error("Operation prohibited while global/metadata_read_only is set.");
goto out;
}
if ((ret = _locking.lock_resource(cmd, resource, flags, lv))) {
if (lck_scope == LCK_VG && !(flags & LCK_CACHE)) {
if (lck_type != LCK_UNLOCK)
lvmcache_lock_vgname(resource, lck_type == LCK_READ);
dev_reset_error_count(cmd);
}
_update_vg_lock_count(resource, flags);
} else
stack;
/* If unlocking, always remove lock from lvmcache even if operation failed. */
if (lck_scope == LCK_VG && !(flags & LCK_CACHE) && lck_type == LCK_UNLOCK) {
lvmcache_unlock_vgname(resource);
if (!ret)
_update_vg_lock_count(resource, flags);
}
out:
_unlock_memory(cmd, lv_op);
_unblock_signals();
return ret;
}
int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const struct logical_volume *lv)
{
char resource[258] __attribute__((aligned(8)));
lv_operation_t lv_op;
int lck_type = flags & LCK_TYPE_MASK;
switch (flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) {
case LCK_LV_SUSPEND:
lv_op = LV_SUSPEND;
break;
case LCK_LV_RESUME:
lv_op = LV_RESUME;
break;
default: lv_op = LV_NOOP;
}
if (flags == LCK_NONE) {
log_debug_locking(INTERNAL_ERROR "%s: LCK_NONE lock requested", vol);
return 1;
}
switch (flags & LCK_SCOPE_MASK) {
case LCK_ACTIVATION:
break;
case LCK_VG:
if (!_blocking_supported)
flags |= LCK_NONBLOCK;
/* Global VG_ORPHANS lock covers all orphan formats. */
if (is_orphan_vg(vol))
vol = VG_ORPHANS;
/* VG locks alphabetical, ORPHAN lock last */
if ((lck_type != LCK_UNLOCK) &&
!(flags & LCK_CACHE) &&
!lvmcache_verify_lock_order(vol))
return_0;
break;
case LCK_LV:
/* All LV locks are non-blocking. */
flags |= LCK_NONBLOCK;
break;
default:
log_error("Unrecognised lock scope: %d",
flags & LCK_SCOPE_MASK);
return 0;
}
if (!dm_strncpy(resource, vol, sizeof(resource))) {
log_error(INTERNAL_ERROR "Resource name %s is too long.", vol);
return 0;
}
if (!_lock_vol(cmd, resource, flags, lv_op, lv))
return_0;
/*
* If a real lock was acquired (i.e. not LCK_CACHE),
* perform an immediate unlock unless LCK_HOLD was requested.
*/
if ((lck_type == LCK_NULL) || (lck_type == LCK_UNLOCK) ||
(flags & (LCK_CACHE | LCK_HOLD)))
return 1;
if (!_lock_vol(cmd, resource, (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op, lv))
return_0;
return 1;
}
/* Lock a list of LVs */
int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive)
{
struct dm_list *lvh;
struct lv_list *lvl;
dm_list_iterate_items(lvl, lvs) {
if (!activate_lv(cmd, lvl->lv)) {
log_error("Failed to activate %s", display_lvname(lvl->lv));
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
if (!deactivate_lv(cmd, lvl->lv))
stack;
}
return 0;
}
}
return 1;
}
int vg_write_lock_held(void)
{
return _vg_write_lock_held;
}
int locking_is_clustered(void)
{
return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
}
int sync_local_dev_names(struct cmd_context* cmd)
{
memlock_unlock(cmd);
return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC_LOCAL, NULL);
}
int sync_dev_names(struct cmd_context* cmd)
{
memlock_unlock(cmd);
return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC, NULL);
}