1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-10-28 11:55:55 +03:00
lvm2/lib/locking/locking.c

541 lines
12 KiB
C
Raw Normal View History

/*
2008-01-30 17:00:02 +03:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
2002-11-18 17:01:16 +03:00
#include "lib.h"
#include "locking.h"
#include "locking_types.h"
#include "lvm-string.h"
#include "activate.h"
#include "toolcontext.h"
2004-03-26 23:49:35 +03:00
#include "memlock.h"
#include "defaults.h"
#include "lvmcache.h"
2008-11-04 18:07:45 +03:00
#include <assert.h>
#include <signal.h>
#include <sys/stat.h>
#include <limits.h>
#include <unistd.h>
static struct locking_type _locking;
static sigset_t _oldset;
static int _vg_lock_count = 0; /* Number of locks held */
static int _vg_write_lock_held = 0; /* VG write lock held? */
static int _signals_blocked = 0;
static int _blocking_supported = 0;
static volatile sig_atomic_t _sigint_caught = 0;
static volatile sig_atomic_t _handler_installed;
static struct sigaction _oldhandler;
static int _oldmasked;
typedef enum {
LV_NOOP,
LV_SUSPEND,
LV_RESUME
} lv_operation_t;
static void _catch_sigint(int unused __attribute__((unused)))
{
_sigint_caught = 1;
}
2007-06-16 00:46:04 +04:00
int sigint_caught(void) {
return _sigint_caught;
}
2007-06-16 00:46:04 +04:00
void sigint_clear(void)
{
_sigint_caught = 0;
}
2007-06-16 00:46:04 +04:00
/*
* Temporarily allow keyboard interrupts to be intercepted and noted;
* saves interrupt handler state for sigint_restore(). Users should
* use the sigint_caught() predicate to check whether interrupt was
* requested and act appropriately. Interrupt flags are never
* cleared automatically by this code, but the tools clear the flag
* before running each command in lvm_run_command(). All other places
* where the flag needs to be cleared need to call sigint_clear().
*/
2007-06-16 00:46:04 +04:00
void sigint_allow(void)
{
struct sigaction handler;
sigset_t sigs;
2007-06-16 00:46:04 +04:00
/*
* Do not overwrite the backed-up handler data -
* just increase nesting count.
*/
if (_handler_installed) {
_handler_installed++;
return;
}
2007-06-16 00:46:04 +04:00
/* Grab old sigaction for SIGINT: shall not fail. */
sigaction(SIGINT, NULL, &handler);
2007-06-16 00:46:04 +04:00
handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
handler.sa_handler = _catch_sigint;
_handler_installed = 1;
2007-06-16 00:46:04 +04:00
/* Override the signal handler: shall not fail. */
sigaction(SIGINT, &handler, &_oldhandler);
2007-06-16 00:46:04 +04:00
/* Unmask SIGINT. Remember to mask it again on restore. */
sigprocmask(0, NULL, &sigs);
if ((_oldmasked = sigismember(&sigs, SIGINT))) {
sigdelset(&sigs, SIGINT);
sigprocmask(SIG_SETMASK, &sigs, NULL);
}
}
2007-06-16 00:46:04 +04:00
void sigint_restore(void)
{
if (!_handler_installed)
return;
if (_handler_installed > 1) {
_handler_installed--;
return;
}
2007-06-16 00:46:04 +04:00
/* Nesting count went down to 0. */
_handler_installed = 0;
if (_oldmasked) {
sigset_t sigs;
sigprocmask(0, NULL, &sigs);
sigaddset(&sigs, SIGINT);
sigprocmask(SIG_SETMASK, &sigs, NULL);
}
sigaction(SIGINT, &_oldhandler, NULL);
}
static void _block_signals(uint32_t flags __attribute((unused)))
{
sigset_t set;
if (_signals_blocked)
return;
if (sigfillset(&set)) {
log_sys_error("sigfillset", "_block_signals");
return;
}
if (sigprocmask(SIG_SETMASK, &set, &_oldset)) {
log_sys_error("sigprocmask", "_block_signals");
return;
}
_signals_blocked = 1;
}
static void _unblock_signals(void)
{
/* Don't unblock signals while any locks are held */
if (!_signals_blocked || _vg_lock_count)
return;
if (sigprocmask(SIG_SETMASK, &_oldset, NULL)) {
log_sys_error("sigprocmask", "_block_signals");
return;
}
_signals_blocked = 0;
}
static void _lock_memory(lv_operation_t lv_op)
2004-03-26 23:49:35 +03:00
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_SUSPEND)
2004-03-26 23:49:35 +03:00
memlock_inc();
}
static void _unlock_memory(lv_operation_t lv_op)
2004-03-26 23:49:35 +03:00
{
if (!(_locking.flags & LCK_PRE_MEMLOCK))
return;
if (lv_op == LV_RESUME)
2004-03-26 23:49:35 +03:00
memlock_dec();
}
2003-05-06 16:03:13 +04:00
void reset_locking(void)
{
int was_locked = _vg_lock_count;
2003-05-06 16:03:13 +04:00
_vg_lock_count = 0;
_vg_write_lock_held = 0;
2003-05-06 16:03:13 +04:00
_locking.reset_locking();
if (was_locked)
_unblock_signals();
}
static void _update_vg_lock_count(const char *resource, uint32_t flags)
{
/* Ignore locks not associated with updating VG metadata */
if ((flags & LCK_SCOPE_MASK) != LCK_VG ||
(flags & LCK_CACHE) ||
!strcmp(resource, VG_GLOBAL))
return;
2002-04-04 15:18:45 +04:00
if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
_vg_lock_count--;
else
_vg_lock_count++;
/* We don't bother to reset this until all VG locks are dropped */
if ((flags & LCK_TYPE_MASK) == LCK_WRITE)
_vg_write_lock_held = 1;
else if (!_vg_lock_count)
_vg_write_lock_held = 0;
}
/*
* Select a locking type
* type: locking type; if < 0, then read config tree value
*/
int init_locking(int type, struct cmd_context *cmd)
{
if (type < 0)
type = find_config_tree_int(cmd, "global/locking_type", 1);
_blocking_supported = find_config_tree_int(cmd,
"global/wait_for_locks", DEFAULT_WAIT_FOR_LOCKS);
switch (type) {
case 0:
init_no_locking(&_locking, cmd);
log_warn("WARNING: Locking disabled. Be careful! "
"This could corrupt your metadata.");
2002-07-11 00:43:32 +04:00
return 1;
case 1:
log_very_verbose("%sFile-based locking selected.",
_blocking_supported ? "" : "Non-blocking ");
if (!init_file_locking(&_locking, cmd))
2002-07-11 00:43:32 +04:00
break;
return 1;
#ifdef HAVE_LIBDL
case 2:
if (!is_static()) {
log_very_verbose("External locking selected.");
if (init_external_locking(&_locking, cmd))
return 1;
}
if (!find_config_tree_int(cmd, "locking/fallback_to_clustered_locking",
find_config_tree_int(cmd, "global/fallback_to_clustered_locking",
DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING)))
break;
#endif
2004-06-24 12:02:38 +04:00
#ifdef CLUSTER_LOCKING_INTERNAL
log_very_verbose("Falling back to internal clustered locking.");
/* Fall through */
2004-06-24 12:02:38 +04:00
case 3:
log_very_verbose("Cluster locking selected.");
if (!init_cluster_locking(&_locking, cmd))
2004-06-24 12:02:38 +04:00
break;
return 1;
#endif
case 4:
log_verbose("Read-only locking selected. "
"Only read operations permitted.");
if (!init_readonly_locking(&_locking, cmd))
break;
return 1;
default:
log_error("Unknown locking type requested.");
return 0;
}
if ((type == 2 || type == 3) &&
2008-01-30 17:00:02 +03:00
find_config_tree_int(cmd, "locking/fallback_to_local_locking",
find_config_tree_int(cmd, "global/fallback_to_local_locking",
DEFAULT_FALLBACK_TO_LOCAL_LOCKING))) {
log_warn("WARNING: Falling back to local file-based locking.");
log_warn("Volume Groups with the clustered attribute will "
"be inaccessible.");
if (init_file_locking(&_locking, cmd))
return 1;
}
2002-07-11 00:43:32 +04:00
if (!ignorelockingfailure())
return 0;
log_verbose("Locking disabled - only read operations permitted.");
init_readonly_locking(&_locking, cmd);
2002-07-11 00:43:32 +04:00
return 1;
}
void fin_locking(void)
{
_locking.fin_locking();
}
/*
* Does the LVM1 driver know of this VG name?
*/
int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname)
{
struct stat info;
char path[PATH_MAX];
2002-11-18 17:01:16 +03:00
/* We'll allow operations on orphans */
if (is_orphan_vg(vgname))
2002-11-18 17:01:16 +03:00
return 1;
/* LVM1 is only present in 2.4 kernels. */
if (strncmp(cmd->kernel_vsn, "2.4.", 4))
return 1;
2006-08-21 16:54:53 +04:00
if (dm_snprintf(path, sizeof(path), "%s/lvm/VGs/%s", cmd->proc_dir,
2002-11-18 17:01:16 +03:00
vgname) < 0) {
log_error("LVM1 proc VG pathname too long for %s", vgname);
return 0;
}
if (stat(path, &info) == 0) {
log_error("%s exists: Is the original LVM driver using "
"this volume group?", path);
return 0;
} else if (errno != ENOENT && errno != ENOTDIR) {
log_sys_error("stat", path);
return 0;
}
return 1;
}
/*
* VG locking is by VG name.
* FIXME This should become VG uuid.
*/
static int _lock_vol(struct cmd_context *cmd, const char *resource,
uint32_t flags, lv_operation_t lv_op)
{
int ret = 0;
2003-01-04 00:10:28 +03:00
_block_signals(flags);
_lock_memory(lv_op);
assert(resource);
2008-05-09 22:45:15 +04:00
if (!*resource) {
log_error(INTERNAL_ERROR "Use of P_orphans is deprecated.");
2008-05-09 22:45:15 +04:00
return 0;
}
if (*resource == '#' && (flags & LCK_CACHE)) {
log_error(INTERNAL_ERROR "P_%s referenced", resource);
2008-05-09 22:45:15 +04:00
return 0;
}
if ((ret = _locking.lock_resource(cmd, resource, flags))) {
if ((flags & LCK_SCOPE_MASK) == LCK_VG &&
!(flags & LCK_CACHE)) {
if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
lvmcache_unlock_vgname(resource);
else
lvmcache_lock_vgname(resource, (flags & LCK_TYPE_MASK)
== LCK_READ);
}
_update_vg_lock_count(resource, flags);
}
_unlock_memory(lv_op);
_unblock_signals();
return ret;
}
int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags)
{
char resource[258] __attribute((aligned(8)));
lv_operation_t lv_op;
switch (flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) {
case LCK_LV_SUSPEND:
lv_op = LV_SUSPEND;
break;
case LCK_LV_RESUME:
lv_op = LV_RESUME;
break;
default: lv_op = LV_NOOP;
}
2007-11-16 00:30:52 +03:00
if (flags == LCK_NONE) {
log_debug(INTERNAL_ERROR "%s: LCK_NONE lock requested", vol);
2007-11-16 00:30:52 +03:00
return 1;
}
switch (flags & LCK_SCOPE_MASK) {
case LCK_VG:
/*
* Automatically set LCK_NONBLOCK if one or more VGs locked.
* This will enforce correctness and prevent deadlocks rather
* than relying on the caller to set the flag properly.
*/
if (!_blocking_supported || vgs_locked())
flags |= LCK_NONBLOCK;
if (vol[0] != '#' &&
((flags & LCK_TYPE_MASK) != LCK_UNLOCK) &&
(!(flags & LCK_CACHE)) &&
!lvmcache_verify_lock_order(vol))
return 0;
/* Lock VG to change on-disk metadata. */
/* If LVM1 driver knows about the VG, it can't be accessed. */
if (!check_lvm1_vg_inactive(cmd, vol))
return 0;
break;
case LCK_LV:
/* All LV locks are non-blocking. */
flags |= LCK_NONBLOCK;
break;
default:
log_error("Unrecognised lock scope: %d",
flags & LCK_SCOPE_MASK);
return 0;
}
strncpy(resource, vol, sizeof(resource));
if (!_lock_vol(cmd, resource, flags, lv_op))
return 0;
/*
* If a real lock was acquired (i.e. not LCK_CACHE),
* perform an immediate unlock unless LCK_HOLD was requested.
*/
if (!(flags & LCK_CACHE) && !(flags & LCK_HOLD) &&
((flags & LCK_TYPE_MASK) != LCK_UNLOCK)) {
if (!_lock_vol(cmd, resource,
(flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op))
return 0;
}
return 1;
}
2004-03-26 23:49:35 +03:00
/* Unlock list of LVs */
int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs)
2004-03-26 23:49:35 +03:00
{
2005-06-01 20:51:55 +04:00
struct lv_list *lvl;
2004-03-26 23:49:35 +03:00
dm_list_iterate_items(lvl, lvs)
if (!resume_lv(cmd, lvl->lv))
stack;
2004-03-26 23:49:35 +03:00
return 1;
}
/* Lock a list of LVs */
int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs)
2004-03-26 23:49:35 +03:00
{
struct dm_list *lvh;
2005-06-01 20:51:55 +04:00
struct lv_list *lvl;
dm_list_iterate_items(lvl, lvs) {
if (!suspend_lv(cmd, lvl->lv)) {
2005-06-01 20:51:55 +04:00
log_error("Failed to suspend %s", lvl->lv->name);
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
if (!resume_lv(cmd, lvl->lv))
stack;
2004-05-05 16:03:07 +04:00
}
return 0;
}
}
return 1;
}
/* Lock a list of LVs */
int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive)
2004-05-05 16:03:07 +04:00
{
struct dm_list *lvh;
2005-06-01 20:51:55 +04:00
struct lv_list *lvl;
dm_list_iterate_items(lvl, lvs) {
if (!exclusive) {
if (!activate_lv(cmd, lvl->lv)) {
log_error("Failed to activate %s", lvl->lv->name);
return 0;
}
} else if (!activate_lv_excl(cmd, lvl->lv)) {
2005-06-01 20:51:55 +04:00
log_error("Failed to activate %s", lvl->lv->name);
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
if (!activate_lv(cmd, lvl->lv))
stack;
2004-03-26 23:49:35 +03:00
}
return 0;
}
}
return 1;
}
int vg_write_lock_held(void)
{
return _vg_write_lock_held;
}
int locking_is_clustered(void)
{
return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
}
int remote_lock_held(const char *vol)
{
int mode = LCK_NULL;
if (!locking_is_clustered())
return 0;
2009-05-21 07:04:52 +04:00
if (!_locking.query_resource)
return -1;
/*
* If an error occured, expect that volume is active
*/
2009-05-21 07:04:52 +04:00
if (!_locking.query_resource(vol, &mode)) {
stack;
return 1;
}
return mode == LCK_NULL ? 0 : 1;
}