1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-10-05 03:49:50 +03:00
lvm2/libdm/libdm-deptree.c

3990 lines
102 KiB
C
Raw Permalink Normal View History

/*
* Copyright (C) 2005-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of the device-mapper userspace tools.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libdm/misc/dmlib.h"
#include "libdm-targets.h"
#include "libdm-common.h"
#include "libdm/misc/kdev_t.h"
#include "libdm/misc/dm-ioctl.h"
#include <stdarg.h>
#include <sys/utsname.h>
#define MAX_TARGET_PARAMSIZE 500000
/* Supported segment types */
enum {
SEG_CACHE,
SEG_CRYPT,
SEG_ERROR,
SEG_LINEAR,
SEG_MIRRORED,
SEG_SNAPSHOT,
SEG_SNAPSHOT_ORIGIN,
SEG_SNAPSHOT_MERGE,
SEG_STRIPED,
SEG_ZERO,
SEG_THIN_POOL,
SEG_THIN,
SEG_RAID0,
2016-07-02 00:20:54 +03:00
SEG_RAID0_META,
SEG_RAID1,
SEG_RAID10,
SEG_RAID4,
SEG_RAID5_N,
SEG_RAID5_LA,
SEG_RAID5_RA,
SEG_RAID5_LS,
SEG_RAID5_RS,
SEG_RAID6_N_6,
SEG_RAID6_ZR,
SEG_RAID6_NR,
SEG_RAID6_NC,
SEG_RAID6_LS_6,
SEG_RAID6_RS_6,
SEG_RAID6_LA_6,
SEG_RAID6_RA_6,
};
2005-11-09 17:10:50 +03:00
/* FIXME Add crypt and multipath support */
static const struct {
unsigned type;
const char target[16];
} _dm_segtypes[] = {
{ SEG_CACHE, "cache" },
{ SEG_CRYPT, "crypt" },
{ SEG_ERROR, "error" },
{ SEG_LINEAR, "linear" },
{ SEG_MIRRORED, "mirror" },
{ SEG_SNAPSHOT, "snapshot" },
{ SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
{ SEG_SNAPSHOT_MERGE, "snapshot-merge" },
{ SEG_STRIPED, "striped" },
{ SEG_ZERO, "zero"},
{ SEG_THIN_POOL, "thin-pool"},
{ SEG_THIN, "thin"},
{ SEG_RAID0, "raid0"},
2016-07-02 00:20:54 +03:00
{ SEG_RAID0_META, "raid0_meta"},
{ SEG_RAID1, "raid1"},
{ SEG_RAID10, "raid10"},
{ SEG_RAID4, "raid4"},
{ SEG_RAID5_N, "raid5_n"},
{ SEG_RAID5_LA, "raid5_la"},
{ SEG_RAID5_RA, "raid5_ra"},
{ SEG_RAID5_LS, "raid5_ls"},
{ SEG_RAID5_RS, "raid5_rs"},
{ SEG_RAID6_N_6,"raid6_n_6"},
{ SEG_RAID6_ZR, "raid6_zr"},
{ SEG_RAID6_NR, "raid6_nr"},
{ SEG_RAID6_NC, "raid6_nc"},
{ SEG_RAID6_LS_6, "raid6_ls_6"},
{ SEG_RAID6_RS_6, "raid6_rs_6"},
{ SEG_RAID6_LA_6, "raid6_la_6"},
{ SEG_RAID6_RA_6, "raid6_ra_6"},
/*
* WARNING: Since 'raid' target overloads this 1:1 mapping table
* for search do not add new enum elements past them!
*/
{ SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
{ SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
{ SEG_RAID10, "raid10_near"}, /* same as "raid10" */
};
/* Some segment types have a list of areas of other devices attached */
struct seg_area {
struct dm_list list;
2005-11-09 17:10:50 +03:00
struct dm_tree_node *dev_node;
uint64_t offset;
};
struct dm_thin_message {
dm_thin_message_t type;
union {
struct {
uint32_t device_id;
uint32_t origin_id;
} m_create_snap;
struct {
uint32_t device_id;
} m_create_thin;
struct {
uint32_t device_id;
} m_delete;
struct {
uint64_t current_id;
uint64_t new_id;
} m_set_transaction_id;
} u;
};
struct thin_message {
struct dm_list list;
struct dm_thin_message message;
int expected_errno;
};
/* Per-segment properties */
struct load_segment {
struct dm_list list;
unsigned type;
uint64_t size;
unsigned area_count; /* Linear + Striped + Mirrored + Crypt */
struct dm_list areas; /* Linear + Striped + Mirrored + Crypt */
uint32_t stripe_size; /* Striped + raid */
int persistent; /* Snapshot */
uint32_t chunk_size; /* Snapshot */
2005-11-09 17:10:50 +03:00
struct dm_tree_node *cow; /* Snapshot */
struct dm_tree_node *origin; /* Snapshot + Snapshot origin + Cache */
struct dm_tree_node *merge; /* Snapshot */
struct dm_tree_node *log; /* Mirror */
uint32_t region_size; /* Mirror + raid */
unsigned clustered; /* Mirror */
unsigned mirror_area_count; /* Mirror */
uint64_t flags; /* Mirror + Raid + Cache */
2006-02-06 23:18:10 +03:00
char *uuid; /* Clustered mirror log */
const char *policy_name; /* Cache */
unsigned policy_argc; /* Cache */
struct dm_config_node *policy_settings; /* Cache */
const char *cipher; /* Crypt */
const char *chainmode; /* Crypt */
const char *iv; /* Crypt */
uint64_t iv_offset; /* Crypt */
const char *key; /* Crypt */
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
int delta_disks; /* raid reshape number of disks */
int data_offset; /* raid reshape data offset on disk to set */
uint64_t rebuilds[RAID_BITMAP_SIZE]; /* raid */
uint64_t writemostly[RAID_BITMAP_SIZE]; /* raid */
uint32_t writebehind; /* raid */
uint32_t max_recovery_rate; /* raid kB/sec/disk */
uint32_t min_recovery_rate; /* raid kB/sec/disk */
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
uint32_t data_copies; /* raid10 data_copies */
struct dm_tree_node *metadata; /* Thin_pool + Cache */
struct dm_tree_node *pool; /* Thin_pool, Thin */
struct dm_tree_node *external; /* Thin */
struct dm_list thin_messages; /* Thin_pool */
uint64_t transaction_id; /* Thin_pool */
uint64_t low_water_mark; /* Thin_pool */
uint32_t data_block_size; /* Thin_pool + cache */
uint32_t migration_threshold; /* Cache */
unsigned skip_block_zeroing; /* Thin_pool */
unsigned ignore_discard; /* Thin_pool target vsn 1.1 */
unsigned no_discard_passdown; /* Thin_pool target vsn 1.1 */
unsigned error_if_no_space; /* Thin pool target vsn 1.10 */
unsigned read_only; /* Thin pool target vsn 1.3 */
uint32_t device_id; /* Thin */
};
/* Per-device properties */
struct load_properties {
int read_only;
uint32_t major;
uint32_t minor;
uint32_t read_ahead;
uint32_t read_ahead_flags;
unsigned segment_count;
int size_changed;
struct dm_list segs;
const char *new_name;
/* If immediate_dev_node is set to 1, try to create the dev node
* as soon as possible (e.g. in preload stage even during traversal
* and processing of dm tree). This will also flush all stacked dev
* node operations, synchronizing with udev.
*/
unsigned immediate_dev_node;
/*
* If the device size changed from zero and this is set,
* don't resume the device immediately, even if the device
* has parents. This works provided the parents do not
* validate the device size and is required by pvmove to
* avoid starting the mirror resync operation too early.
*/
unsigned delay_resume_if_new;
/*
* Preload tree normally only loads and not resume, but there is
* automatic resume when target is extended, as it's believed
* there can be no i/o flying to this 'new' extended space
* from any device above. Reason is that preloaded target above
* may actually need to see its bigger subdevice before it
* gets suspended. As long as devices are simple linears
* there is no problem to resume bigger device in preload (before commit).
* However complex targets like thin-pool (raid,cache...)
* they shall not be resumed before their commit.
*/
unsigned delay_resume_if_extended;
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/*
* Call node_send_messages(), set to 2 if there are messages
* When != 0, it validates matching transaction id, thus thin-pools
* where transaction_id is passed as 0 are never validated, this
* allows external management of thin-pool TID.
thin: move pool messaging from resume to suspend Existing messaging intarface for thin-pool has a few 'weak' points: * Message were posted with each 'resume' operation, thus not allowing activation of thin-pool with the existing state. * Acceleration skipped suspend step has not worked in cluster, since clvmd resumes only nodes which are suspended (have proper lock state). * Resume may fail and code is not really designed to 'fail' in this phase (generic rule here is resume DOES NOT fail unless something serious is wrong and lvm2 tool usually doesn't handle recovery path in this case.) * Full thin-pool suspend happened, when taken a thin-volume snapshot. With this patch the new method relocates message passing into suspend state. This has a few drawbacks with current API, but overal it performs better and gives are more posibilities to deal with errors. Patch introduces a new logic for 'origin-only' suspend of thin-pool and this also relates to thin-volume when taking snapshot. When suspend_origin_only operation is invoked on a pool with queued messages then only those messages are posted to thin-pool and actual suspend of thin pool and data and metadata volume is skipped. This makes taking a snapshot of thin-volume lighter operation and avoids blocking of other unrelated active thin volumes. Also fail now happens in 'suspend' state where the 'Fail' is more expected and it is better handled through error paths. Activation of thin-pool is now not sending any message and leaves upto a tool to decided later how to finish unfinished double-commit transaction. Problem which needs some API improvements relates to the lvm2 tree construction. For the suspend tree we do not add target table line into the tree, but only a device is inserted into a tree. Current mechanism to attach messages for thin-pool requires the libdm to know about thin-pool target, so lvm2 currently takes assumption, node is really a thin-pool and fills in the table line for this node (which should be ensured by the PRELOAD phase, but it's a misuse of internal API) we would possibly need to be able to attach message to 'any' node. Other thing to notice - current messaging interface in thin-pool target requires to suspend thin volume origin first and then send a create message, but this could not have any 'nice' solution on lvm2 side and IMHO we should introduce something like 'create_after_resume' message. Patch also changes the moment, where lvm2 transaction id is increased. Now it happens only after successful finish of kernel transaction id change. This change was needed to handle properly activation of pool, which is in the middle of unfinished transaction, and also this corrects usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
*/
unsigned send_messages;
/* Skip suspending node's children, used when sending messages to thin-pool */
int skip_suspend;
/* Suspend and Resume siblings after node activation with udev flags*/
unsigned reactivate_siblings;
uint16_t reactivate_udev_flags;
};
/* Two of these used to join two nodes with uses and used_by. */
2005-11-09 17:10:50 +03:00
struct dm_tree_link {
struct dm_list list;
2005-11-09 17:10:50 +03:00
struct dm_tree_node *node;
};
2005-11-09 17:10:50 +03:00
struct dm_tree_node {
struct dm_tree *dtree;
2011-08-19 21:02:48 +04:00
const char *name;
const char *uuid;
struct dm_info info;
2011-08-19 21:02:48 +04:00
struct dm_list uses; /* Nodes this node uses */
struct dm_list used_by; /* Nodes that use this node */
2005-11-22 23:00:35 +03:00
int activation_priority; /* 0 gets activated first */
int implicit_deps; /* 1 device only implicitly referenced */
2005-11-22 23:00:35 +03:00
uint16_t udev_flags; /* Udev control flags */
void *context; /* External supplied context */
struct load_properties props; /* For creation/table (re)load */
/*
* If presuspend of child node is needed
* Note: only direct child is allowed
*/
struct dm_tree_node *presuspend_node;
/* Callback */
dm_node_callback_fn callback;
void *callback_data;
int activated; /* tracks activation during preload */
};
2005-11-09 17:10:50 +03:00
struct dm_tree {
2005-10-17 02:57:20 +04:00
struct dm_pool *mem;
struct dm_hash_table *devs;
struct dm_hash_table *uuids;
2005-11-09 17:10:50 +03:00
struct dm_tree_node root;
2006-01-31 02:36:04 +03:00
int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
int no_flush; /* 1 sets noflush (mirrors/multipath) */
int retry_remove; /* 1 retries remove if not successful */
uint32_t cookie;
char buf[DM_NAME_LEN + 32]; /* print buffer for device_name (major:minor) */
const char * const *optional_uuid_suffixes; /* uuid suffixes ignored when matching */
};
/*
* Tree functions.
*/
2005-11-09 17:10:50 +03:00
struct dm_tree *dm_tree_create(void)
{
struct dm_pool *dmem;
2005-11-09 17:10:50 +03:00
struct dm_tree *dtree;
if (!(dmem = dm_pool_create("dtree", 1024)) ||
!(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
log_error("Failed to allocate dtree.");
if (dmem)
dm_pool_destroy(dmem);
return NULL;
}
2005-11-09 17:10:50 +03:00
dtree->root.dtree = dtree;
dm_list_init(&dtree->root.uses);
dm_list_init(&dtree->root.used_by);
2006-01-31 02:36:04 +03:00
dtree->skip_lockfs = 0;
2007-01-09 22:44:07 +03:00
dtree->no_flush = 0;
dtree->mem = dmem;
dtree->optional_uuid_suffixes = NULL;
2005-11-09 17:10:50 +03:00
if (!(dtree->devs = dm_hash_create(8))) {
log_error("dtree hash creation failed");
dm_pool_destroy(dtree->mem);
return NULL;
}
2005-11-09 17:10:50 +03:00
if (!(dtree->uuids = dm_hash_create(32))) {
log_error("dtree uuid hash creation failed");
dm_hash_destroy(dtree->devs);
dm_pool_destroy(dtree->mem);
return NULL;
}
2005-11-09 17:10:50 +03:00
return dtree;
}
2005-11-09 17:10:50 +03:00
void dm_tree_free(struct dm_tree *dtree)
{
2005-11-09 17:10:50 +03:00
if (!dtree)
return;
2005-11-09 17:10:50 +03:00
dm_hash_destroy(dtree->uuids);
dm_hash_destroy(dtree->devs);
dm_pool_destroy(dtree->mem);
}
void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
{
node->dtree->cookie = cookie;
}
uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
{
return node->dtree->cookie;
}
void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
{
dnode->dtree->skip_lockfs = 1;
}
void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
{
dnode->dtree->no_flush = 1;
}
void dm_tree_retry_remove(struct dm_tree_node *dnode)
{
dnode->dtree->retry_remove = 1;
}
/*
* Node functions.
*/
static int _nodes_are_linked(const struct dm_tree_node *parent,
const struct dm_tree_node *child)
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link *dlink;
dm_list_iterate_items(dlink, &parent->uses)
if (dlink->node == child)
return 1;
return 0;
}
static int _link(struct dm_list *list, struct dm_tree_node *node)
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link *dlink;
2005-11-09 17:10:50 +03:00
if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
log_error("dtree link allocation failed");
return 0;
}
dlink->node = node;
dm_list_add(list, &dlink->list);
return 1;
}
2005-11-09 17:10:50 +03:00
static int _link_nodes(struct dm_tree_node *parent,
struct dm_tree_node *child)
{
if (_nodes_are_linked(parent, child))
return 1;
if (!_link(&parent->uses, child))
return 0;
if (!_link(&child->used_by, parent))
return 0;
return 1;
}
static void _unlink(struct dm_list *list, struct dm_tree_node *node)
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link *dlink;
dm_list_iterate_items(dlink, list)
if (dlink->node == node) {
dm_list_del(&dlink->list);
break;
}
}
2005-11-09 17:10:50 +03:00
static void _unlink_nodes(struct dm_tree_node *parent,
struct dm_tree_node *child)
{
if (!_nodes_are_linked(parent, child))
return;
_unlink(&parent->uses, child);
_unlink(&child->used_by, parent);
}
2005-11-09 17:10:50 +03:00
static int _add_to_toplevel(struct dm_tree_node *node)
{
2005-11-09 17:10:50 +03:00
return _link_nodes(&node->dtree->root, node);
}
2005-11-09 17:10:50 +03:00
static void _remove_from_toplevel(struct dm_tree_node *node)
{
2009-12-11 16:16:37 +03:00
_unlink_nodes(&node->dtree->root, node);
}
2005-11-09 17:10:50 +03:00
static int _add_to_bottomlevel(struct dm_tree_node *node)
{
2005-11-09 17:10:50 +03:00
return _link_nodes(node, &node->dtree->root);
}
2005-11-09 17:10:50 +03:00
static void _remove_from_bottomlevel(struct dm_tree_node *node)
{
2009-12-11 16:16:37 +03:00
_unlink_nodes(node, &node->dtree->root);
}
2005-11-09 17:10:50 +03:00
static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
{
/* Don't link to root node if child already has a parent */
if (parent == &parent->dtree->root) {
2005-11-09 17:10:50 +03:00
if (dm_tree_node_num_children(child, 1))
return 1;
} else
_remove_from_toplevel(child);
if (child == &child->dtree->root) {
2005-11-09 17:10:50 +03:00
if (dm_tree_node_num_children(parent, 0))
return 1;
} else
_remove_from_bottomlevel(parent);
return _link_nodes(parent, child);
}
2005-11-09 17:10:50 +03:00
static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
const char *name,
const char *uuid,
struct dm_info *info,
void *context,
uint16_t udev_flags)
{
2005-11-09 17:10:50 +03:00
struct dm_tree_node *node;
2012-06-21 14:55:30 +04:00
dev_t dev;
if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node))) ||
!(node->name = dm_pool_strdup(dtree->mem, name)) ||
!(node->uuid = dm_pool_strdup(dtree->mem, uuid))) {
log_error("_create_dm_tree_node alloc failed.");
return NULL;
}
2005-11-09 17:10:50 +03:00
node->dtree = dtree;
node->info = *info;
node->context = context;
node->udev_flags = udev_flags;
dm_list_init(&node->uses);
dm_list_init(&node->used_by);
dm_list_init(&node->props.segs);
dev = MKDEV(info->major, info->minor);
2005-11-09 17:10:50 +03:00
if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
sizeof(dev), node)) {
2005-11-09 17:10:50 +03:00
log_error("dtree node hash insertion failed");
dm_pool_free(dtree->mem, node);
return NULL;
}
if (*uuid && !dm_hash_insert(dtree->uuids, uuid, node)) {
2005-11-09 17:10:50 +03:00
log_error("dtree uuid hash insertion failed");
dm_hash_remove_binary(dtree->devs, (const char *) &dev,
sizeof(dev));
2005-11-09 17:10:50 +03:00
dm_pool_free(dtree->mem, node);
return NULL;
}
return node;
}
2005-11-09 17:10:50 +03:00
static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
uint32_t major, uint32_t minor)
{
dev_t dev = MKDEV(major, minor);
2005-11-09 17:10:50 +03:00
return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
2014-02-14 23:46:55 +04:00
sizeof(dev));
}
void dm_tree_set_optional_uuid_suffixes(struct dm_tree *dtree, const char **optional_uuid_suffixes)
{
dtree->optional_uuid_suffixes = optional_uuid_suffixes;
}
static const char *_node_name(struct dm_tree_node *dnode);
2005-11-09 17:10:50 +03:00
static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
const char *uuid)
{
struct dm_tree_node *node;
const char *default_uuid_prefix;
size_t default_uuid_prefix_len;
const char *suffix, *suffix_position;
char uuid_without_suffix[DM_UUID_LEN + 1];
unsigned i = 0;
const char * const *suffix_list = dtree->optional_uuid_suffixes;
if ((node = dm_hash_lookup(dtree->uuids, uuid))) {
log_debug_activation("Matched uuid %s %s in deptree.", uuid, _node_name(node));
return node;
}
if (suffix_list && (suffix_position = strrchr(uuid, '-'))) {
while ((suffix = suffix_list[i++])) {
if (strcmp(suffix_position + 1, suffix))
continue;
dm_strncpy(uuid_without_suffix, uuid, sizeof(uuid_without_suffix));
uuid_without_suffix[suffix_position - uuid] = '\0';
if ((node = dm_hash_lookup(dtree->uuids, uuid_without_suffix))) {
log_debug_activation("Matched uuid %s %s (missing suffix -%s) in deptree.",
uuid_without_suffix, _node_name(node), suffix);
return node;
}
break;
};
}
default_uuid_prefix = dm_uuid_prefix();
default_uuid_prefix_len = strlen(default_uuid_prefix);
if ((strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len) == 0) &&
(node = dm_hash_lookup(dtree->uuids, uuid + default_uuid_prefix_len))) {
log_debug_activation("Matched uuid %s %s (missing prefix) in deptree.",
uuid + default_uuid_prefix_len, _node_name(node));
return node;
}
log_debug_activation("Not matched uuid %s in deptree.", uuid);
return NULL;
}
/* Return node's device_name (major:minor) for debug messages */
static const char *_node_name(struct dm_tree_node *dnode)
{
if (dm_snprintf(dnode->dtree->buf, sizeof(dnode->dtree->buf),
"%s (" FMTu32 ":" FMTu32 ")",
dnode->name ? dnode->name : "",
dnode->info.major, dnode->info.minor) < 0) {
stack;
return dnode->name;
}
return dnode->dtree->buf;
}
void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
{
if (udev_flags != dnode->udev_flags)
log_debug_activation("Resetting %s udev_flags from 0x%x to 0x%x.",
_node_name(dnode),
dnode->udev_flags, udev_flags);
dnode->udev_flags = udev_flags;
}
void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
uint32_t read_ahead,
uint32_t read_ahead_flags)
{
dnode->props.read_ahead = read_ahead;
dnode->props.read_ahead_flags = read_ahead_flags;
}
void dm_tree_node_set_presuspend_node(struct dm_tree_node *