1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/lib/locking/locking.h

272 lines
9.7 KiB
C
Raw Normal View History

2002-02-08 17:30:37 +03:00
/*
2004-03-30 23:35:44 +04:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
2002-02-08 17:30:37 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
2002-02-08 17:30:37 +03:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2002-02-08 17:30:37 +03:00
*/
#ifndef _LVM_LOCKING_H
#define _LVM_LOCKING_H
2002-02-08 17:30:37 +03:00
#include "uuid.h"
#include "config.h"
struct logical_volume;
int init_locking(int type, struct cmd_context *cmd, int suppress_messages);
2002-02-08 17:30:37 +03:00
void fin_locking(void);
2003-05-06 16:03:13 +04:00
void reset_locking(void);
int vg_write_lock_held(void);
int locking_is_clustered(void);
int locking_supports_remote_queries(void);
2002-02-08 17:30:37 +03:00
#ifndef NODE_ALL
# define NODE_ALL "*"
# define NODE_LOCAL "."
# define NODE_REMOTE "^"
#endif
int cluster_lock_held(const char *vol, const char *node, int *exclusive);
2002-02-08 17:30:37 +03:00
/*
* LCK_VG:
2008-05-09 23:26:58 +04:00
* Lock/unlock on-disk volume group data.
* Use VG_ORPHANS to lock all orphan PVs.
* Use VG_GLOBAL as a global lock and to wipe the internal cache.
* char *vol holds volume group name.
* Set LCK_CACHE flag when manipulating 'vol' metadata in the internal cache.
* (Like commit, revert or invalidate metadata.)
* If more than one lock needs to be held simultaneously, they must be
* acquired in alphabetical order of 'vol' (to avoid deadlocks), with
* VG_ORPHANS last.
2002-02-08 17:30:37 +03:00
*
2011-04-29 04:21:13 +04:00
* Use VG_SYNC_NAMES to ensure /dev is up-to-date for example, with udev,
* by waiting for any asynchronous events issued to have completed.
2011-04-29 00:29:59 +04:00
*
* LCK_LV:
2002-02-08 17:30:37 +03:00
* Lock/unlock an individual logical volume
* char *vol holds lvid
2002-02-08 17:30:37 +03:00
*/
int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const struct logical_volume *lv);
2002-02-08 17:30:37 +03:00
2008-05-09 23:26:58 +04:00
/*
* Internal locking representation.
* LCK_VG: Uses prefix V_ unless the vol begins with # (i.e. #global or #orphans)
* or the LCK_CACHE flag is set when it uses the prefix P_.
* If LCK_CACHE is set, we do not take out a real lock.
* NB In clustered situations, LCK_CACHE is not propagated directly to remote nodes.
* (It can be deduced from lock name.)
2008-05-09 23:26:58 +04:00
*/
/*
* Does the LVM1 driver have this VG active?
*/
int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname);
2002-02-08 17:30:37 +03:00
/*
* Lock type - these numbers are the same as VMS and the IBM DLM
2002-02-08 17:30:37 +03:00
*/
#define LCK_TYPE_MASK 0x00000007U
2012-04-24 16:17:12 +04:00
#define LCK_NULL 0x00000000U /* LCK$_NLMODE (Deactivate) */
#define LCK_READ 0x00000001U /* LCK$_CRMODE (Activate) */
/* LCK$_CWMODE */
#define LCK_PREAD 0x00000003U /* LCK$_PRMODE */
2012-04-24 16:17:12 +04:00
#define LCK_WRITE 0x00000004U /* LCK$_PWMODE (Suspend) */
#define LCK_EXCL 0x00000005U /* LCK$_EXMODE (Exclusive) */
#define LCK_UNLOCK 0x00000006U /* This is ours (Resume) */
2002-02-08 17:30:37 +03:00
/*
* Lock flags - these numbers are the same as DLM
*/
#define LCKF_NOQUEUE 0x00000001U /* LKF$_NOQUEUE */
#define LCKF_CONVERT 0x00000004U /* LKF$_CONVERT */
2002-02-08 17:30:37 +03:00
/*
* Lock scope
*/
#define LCK_SCOPE_MASK 0x00001008U
#define LCK_VG 0x00000000U /* Volume Group */
#define LCK_LV 0x00000008U /* Logical Volume */
#define LCK_ACTIVATION 0x00001000U /* Activation */
2002-02-08 17:30:37 +03:00
/*
* Lock bits.
* Bottom 8 bits except LCK_LOCAL form args[0] in cluster comms.
2002-02-08 17:30:37 +03:00
*/
#define LCK_NONBLOCK 0x00000010U /* Don't block waiting for lock? */
#define LCK_HOLD 0x00000020U /* Hold lock when lock_vol returns? */
#define LCK_CLUSTER_VG 0x00000080U /* VG is clustered */
#define LCK_LOCAL 0x00000040U /* Don't propagate to other nodes */
#define LCK_REMOTE 0x00000800U /* Propagate to remote nodes only */
2008-05-09 23:26:58 +04:00
#define LCK_CACHE 0x00000100U /* Operation on cache only using P_ lock */
#define LCK_ORIGIN_ONLY 0x00000200U /* Operation should bypass any snapshots */
#define LCK_REVERT 0x00000400U /* Revert any incomplete change */
2002-02-08 17:30:37 +03:00
2006-03-10 01:34:13 +03:00
/*
* Additional lock bits for cluster communication via args[1]
2006-03-10 01:34:13 +03:00
*/
#define LCK_PARTIAL_MODE 0x01 /* Partial activation? */
#define LCK_MIRROR_NOSYNC_MODE 0x02 /* Mirrors don't require sync */
#define LCK_DMEVENTD_MONITOR_MODE 0x04 /* Register with dmeventd */
/* Not yet used. */
#define LCK_CONVERT_MODE 0x08 /* Convert existing lock */
#define LCK_TEST_MODE 0x10 /* Test mode: No activation */
#define LCK_ORIGIN_ONLY_MODE 0x20 /* Same as above */
#define LCK_DMEVENTD_MONITOR_IGNORE 0x40 /* Whether to ignore dmeventd */
#define LCK_REVERT_MODE 0x80 /* Remove inactive tables */
/*
* Special cases of VG locks.
*/
#define VG_ORPHANS "#orphans"
#define VG_GLOBAL "#global"
#define VG_SYNC_NAMES "#sync_names"
2006-03-10 01:34:13 +03:00
/*
* Common combinations
*/
2007-11-16 00:30:52 +03:00
#define LCK_NONE (LCK_VG | LCK_NULL)
#define LCK_ACTIVATE_LOCK (LCK_ACTIVATION | LCK_WRITE | LCK_HOLD)
#define LCK_ACTIVATE_UNLOCK (LCK_ACTIVATION | LCK_UNLOCK)
#define LCK_VG_READ (LCK_VG | LCK_READ | LCK_HOLD)
#define LCK_VG_WRITE (LCK_VG | LCK_WRITE | LCK_HOLD)
2002-04-04 15:18:45 +04:00
#define LCK_VG_UNLOCK (LCK_VG | LCK_UNLOCK)
#define LCK_VG_DROP_CACHE (LCK_VG | LCK_WRITE | LCK_CACHE)
/* FIXME: LCK_HOLD abused here */
#define LCK_VG_COMMIT (LCK_VG | LCK_WRITE | LCK_CACHE | LCK_HOLD)
#define LCK_VG_REVERT (LCK_VG | LCK_READ | LCK_CACHE | LCK_HOLD)
#define LCK_VG_BACKUP (LCK_VG | LCK_CACHE)
2011-04-29 00:29:59 +04:00
#define LCK_VG_SYNC (LCK_NONE | LCK_CACHE)
#define LCK_VG_SYNC_LOCAL (LCK_NONE | LCK_CACHE | LCK_LOCAL)
#define LCK_LV_EXCLUSIVE (LCK_LV | LCK_EXCL)
#define LCK_LV_SUSPEND (LCK_LV | LCK_WRITE)
#define LCK_LV_RESUME (LCK_LV | LCK_UNLOCK)
#define LCK_LV_ACTIVATE (LCK_LV | LCK_READ)
#define LCK_LV_DEACTIVATE (LCK_LV | LCK_NULL)
#define LCK_MASK (LCK_TYPE_MASK | LCK_SCOPE_MASK)
#define LCK_LV_CLUSTERED(lv) \
(vg_is_clustered((lv)->vg) ? LCK_CLUSTER_VG : 0)
#define lock_lv_vol(cmd, lv, flags) lock_vol(cmd, (lv)->lvid.s, flags | LCK_LV_CLUSTERED(lv), lv)
/*
* Activation locks are wrapped around activation commands that have to
* be processed atomically one-at-a-time.
* If a VG WRITE lock is held or clustered activation activates simple LV
* an activation lock is redundant.
*
* Some LV types do require taking a lock common for whole group of LVs.
* TODO: For simplicity reasons ATM take a VG activation global lock and
* later more fine-grained component detection algorithm can be added
*/
#define lv_type_requires_activation_lock(lv) ((lv_is_thin_type(lv) || lv_is_cache_type(lv) || lv_is_mirror_type(lv) || lv_is_raid_type(lv) || lv_is_origin(lv) || lv_is_snapshot(lv)) ? 1 : 0)
#define lv_activation_lock_name(lv) (lv_type_requires_activation_lock(lv) ? (lv)->vg->name : (lv)->lvid.s)
#define lv_requires_activation_lock_now(lv) ((!vg_write_lock_held() && (!vg_is_clustered((lv)->vg) || !lv_type_requires_activation_lock(lv))) ? 1 : 0)
#define lock_activation(cmd, lv) (lv_requires_activation_lock_now(lv) ? lock_vol(cmd, lv_activation_lock_name(lv), LCK_ACTIVATE_LOCK, lv) : 1)
#define unlock_activation(cmd, lv) (lv_requires_activation_lock_now(lv) ? lock_vol(cmd, lv_activation_lock_name(lv), LCK_ACTIVATE_UNLOCK, lv) : 1)
/*
* Place temporary exclusive 'activation' lock around an LV locking operation
* to serialise it.
*/
#define lock_lv_vol_serially(cmd, lv, flags) \
({ \
int rr = 0; \
\
if (lock_activation((cmd), (lv))) { \
rr = lock_lv_vol((cmd), (lv), (flags)); \
unlock_activation((cmd), (lv)); \
} \
rr; \
})
lvmetad: two phase vg_update Previously, a command sent lvmetad new VG metadata in vg_commit(). In vg_commit(), devices are suspended, so any memory allocation done by the command while sending to lvmetad, or by lvmetad while updating its cache could deadlock if memory reclaim was triggered. Now lvmetad is updated in unlock_vg(), after devices are resumed. The new method for updating VG metadata in lvmetad is in two phases: 1. In vg_write(), before devices are suspended, the command sends lvmetad a short message ("set_vg_info") telling it what the new VG seqno will be. lvmetad sees that the seqno is newer than the seqno of its cached VG, so it sets the INVALID flag for the cached VG. If sending the message to lvmetad fails, the command fails before the metadata is committed and the change is not made. If sending the message succeeds, vg_commit() is called. 2. In unlock_vg(), after devices are resumed, the command sends lvmetad the standard vg_update message with the new metadata. lvmetad sees that the seqno in the new metadata matches the seqno it saved from set_vg_info, and knows it has the latest copy, so it clears the INVALID flag for the cached VG. If a command fails between 1 and 2 (after committing the VG on disk, but before sending lvmetad the new metadata), the cached VG retains the INVALID flag in lvmetad. A subsequent command will read the cached VG from lvmetad, see the INVALID flag, ignore the cached copy, read the VG from disk instead, update the lvmetad copy with the latest copy from disk, (this clears the INVALID flag in lvmetad), and use the correct VG metadata for the command. (This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
#define unlock_vg(cmd, vg, vol) \
do { \
lvmetad: two phase vg_update Previously, a command sent lvmetad new VG metadata in vg_commit(). In vg_commit(), devices are suspended, so any memory allocation done by the command while sending to lvmetad, or by lvmetad while updating its cache could deadlock if memory reclaim was triggered. Now lvmetad is updated in unlock_vg(), after devices are resumed. The new method for updating VG metadata in lvmetad is in two phases: 1. In vg_write(), before devices are suspended, the command sends lvmetad a short message ("set_vg_info") telling it what the new VG seqno will be. lvmetad sees that the seqno is newer than the seqno of its cached VG, so it sets the INVALID flag for the cached VG. If sending the message to lvmetad fails, the command fails before the metadata is committed and the change is not made. If sending the message succeeds, vg_commit() is called. 2. In unlock_vg(), after devices are resumed, the command sends lvmetad the standard vg_update message with the new metadata. lvmetad sees that the seqno in the new metadata matches the seqno it saved from set_vg_info, and knows it has the latest copy, so it clears the INVALID flag for the cached VG. If a command fails between 1 and 2 (after committing the VG on disk, but before sending lvmetad the new metadata), the cached VG retains the INVALID flag in lvmetad. A subsequent command will read the cached VG from lvmetad, see the INVALID flag, ignore the cached copy, read the VG from disk instead, update the lvmetad copy with the latest copy from disk, (this clears the INVALID flag in lvmetad), and use the correct VG metadata for the command. (This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
if (vg && !lvmetad_vg_update_finish(vg)) \
stack; \
if (is_real_vg(vol) && !sync_dev_names(cmd)) \
stack; \
if (!lock_vol(cmd, vol, LCK_VG_UNLOCK, NULL)) \
stack; \
} while (0)
#define unlock_and_release_vg(cmd, vg, vol) \
2009-05-21 07:04:52 +04:00
do { \
lvmetad: two phase vg_update Previously, a command sent lvmetad new VG metadata in vg_commit(). In vg_commit(), devices are suspended, so any memory allocation done by the command while sending to lvmetad, or by lvmetad while updating its cache could deadlock if memory reclaim was triggered. Now lvmetad is updated in unlock_vg(), after devices are resumed. The new method for updating VG metadata in lvmetad is in two phases: 1. In vg_write(), before devices are suspended, the command sends lvmetad a short message ("set_vg_info") telling it what the new VG seqno will be. lvmetad sees that the seqno is newer than the seqno of its cached VG, so it sets the INVALID flag for the cached VG. If sending the message to lvmetad fails, the command fails before the metadata is committed and the change is not made. If sending the message succeeds, vg_commit() is called. 2. In unlock_vg(), after devices are resumed, the command sends lvmetad the standard vg_update message with the new metadata. lvmetad sees that the seqno in the new metadata matches the seqno it saved from set_vg_info, and knows it has the latest copy, so it clears the INVALID flag for the cached VG. If a command fails between 1 and 2 (after committing the VG on disk, but before sending lvmetad the new metadata), the cached VG retains the INVALID flag in lvmetad. A subsequent command will read the cached VG from lvmetad, see the INVALID flag, ignore the cached copy, read the VG from disk instead, update the lvmetad copy with the latest copy from disk, (this clears the INVALID flag in lvmetad), and use the correct VG metadata for the command. (This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
unlock_vg(cmd, vg, vol); \
release_vg(vg); \
2009-05-21 07:04:52 +04:00
} while (0)
2004-03-26 23:49:35 +03:00
#define resume_lv(cmd, lv) \
({ \
int rr = lock_lv_vol((cmd), (lv), LCK_LV_RESUME); \
unlock_activation((cmd), (lv)); \
rr; \
})
#define resume_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME | LCK_ORIGIN_ONLY)
#define revert_lv(cmd, lv) \
({ \
int rr = lock_lv_vol((cmd), (lv), LCK_LV_RESUME | LCK_REVERT); \
\
unlock_activation((cmd), (lv)); \
rr; \
})
#define suspend_lv(cmd, lv) \
(lock_activation((cmd), (lv)) ? lock_lv_vol((cmd), (lv), LCK_LV_SUSPEND | LCK_HOLD) : 0)
#define suspend_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_SUSPEND | LCK_HOLD | LCK_ORIGIN_ONLY)
#define deactivate_lv(cmd, lv) lock_lv_vol_serially(cmd, lv, LCK_LV_DEACTIVATE)
#define activate_lv(cmd, lv) lock_lv_vol_serially(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD)
#define activate_lv_excl_local(cmd, lv) \
lock_lv_vol_serially(cmd, lv, LCK_LV_EXCLUSIVE | LCK_HOLD | LCK_LOCAL)
#define activate_lv_excl_remote(cmd, lv) \
lock_lv_vol(cmd, lv, LCK_LV_EXCLUSIVE | LCK_HOLD | LCK_REMOTE)
struct logical_volume;
int activate_lv_excl(struct cmd_context *cmd, struct logical_volume *lv);
#define activate_lv_local(cmd, lv) \
lock_lv_vol_serially(cmd, lv, LCK_LV_ACTIVATE | LCK_HOLD | LCK_LOCAL)
#define deactivate_lv_local(cmd, lv) \
lock_lv_vol_serially(cmd, lv, LCK_LV_DEACTIVATE | LCK_LOCAL)
#define drop_cached_metadata(vg) \
lock_vol((vg)->cmd, (vg)->name, LCK_VG_DROP_CACHE, NULL)
#define remote_commit_cached_metadata(vg) \
lock_vol((vg)->cmd, (vg)->name, LCK_VG_COMMIT, NULL)
#define remote_revert_cached_metadata(vg) \
lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT, NULL)
#define remote_backup_metadata(vg) \
lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP, NULL)
2011-04-29 00:29:59 +04:00
int sync_local_dev_names(struct cmd_context* cmd);
int sync_dev_names(struct cmd_context* cmd);
2004-05-05 16:03:07 +04:00
2004-03-26 23:49:35 +03:00
/* Process list of LVs */
struct volume_group;
int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs,
struct volume_group *vg_to_revert);
int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs);
int revert_lvs(struct cmd_context *cmd, struct dm_list *lvs);
int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive);
2004-03-26 23:49:35 +03:00
#endif