/* * Copyright (C) 2011-2013 Red Hat, Inc. All rights reserved. * * This file is part of LVM2. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU Lesser General Public License v.2.1. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "lib.h" #include "toolcontext.h" #include "metadata.h" #include "segtype.h" #include "text_export.h" #include "config.h" #include "activate.h" #include "str_list.h" #include "defaults.h" #ifdef DMEVENTD # include "libdevmapper-event.h" #endif /* Dm kernel module name for thin provisiong */ static const char _thin_pool_module[] = "thin-pool"; static const char _thin_module[] = "thin"; /* * Macro used as return argument - returns 0. * return is left to be written in the function for better readability. */ #define SEG_LOG_ERROR(t, p...) \ log_error(t " segment %s of logical volume %s.", ## p, \ dm_config_parent_name(sn), seg->lv->name), 0; /* TODO: using static field here, maybe should be a part of segment_type */ static unsigned _feature_mask; static const char *_thin_pool_name(const struct lv_segment *seg) { return seg->segtype->name; } static int _thin_pool_add_message(struct lv_segment *seg, const char *key, const struct dm_config_node *sn) { const char *lv_name = NULL; struct logical_volume *lv = NULL; uint32_t delete_id = 0; dm_thin_message_t type; /* Message must have only one from: create, delete */ if (dm_config_get_str(sn, "create", &lv_name)) { if (!(lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown LV %s for create message in", lv_name); /* FIXME: switch to _SNAP later, if the created LV has an origin */ type = DM_THIN_MESSAGE_CREATE_THIN; } else if (dm_config_get_uint32(sn, "delete", &delete_id)) type = DM_THIN_MESSAGE_DELETE; else return SEG_LOG_ERROR("Unknown message in"); if (!attach_pool_message(seg, type, lv, delete_id, 1)) return_0; return 1; } static int _thin_pool_text_import(struct lv_segment *seg, const struct dm_config_node *sn, struct dm_hash_table *pv_hash __attribute__((unused))) { const char *lv_name; struct logical_volume *pool_data_lv, *pool_metadata_lv; const char *discards_str = NULL; if (!dm_config_get_str(sn, "metadata", &lv_name)) return SEG_LOG_ERROR("Metadata must be a string in"); if (!(pool_metadata_lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown metadata %s in", lv_name); if (!dm_config_get_str(sn, "pool", &lv_name)) return SEG_LOG_ERROR("Pool must be a string in"); if (!(pool_data_lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown pool %s in", lv_name); if (!attach_pool_data_lv(seg, pool_data_lv)) return_0; if (!attach_pool_metadata_lv(seg, pool_metadata_lv)) return_0; if (!dm_config_get_uint64(sn, "transaction_id", &seg->transaction_id)) return SEG_LOG_ERROR("Could not read transaction_id for"); if (!dm_config_get_uint32(sn, "chunk_size", &seg->chunk_size)) return SEG_LOG_ERROR("Could not read chunk_size"); if (dm_config_has_node(sn, "discards") && !dm_config_get_str(sn, "discards", &discards_str)) return SEG_LOG_ERROR("Could not read discards for"); if (!discards_str) seg->discards = THIN_DISCARDS_IGNORE; else if (!get_pool_discards(discards_str, &seg->discards)) return SEG_LOG_ERROR("Discards option unsupported for"); if (dm_config_has_node(sn, "low_water_mark") && !dm_config_get_uint64(sn, "low_water_mark", &seg->low_water_mark)) return SEG_LOG_ERROR("Could not read low_water_mark"); if ((seg->chunk_size < DM_THIN_MIN_DATA_BLOCK_SIZE) || (seg->chunk_size > DM_THIN_MAX_DATA_BLOCK_SIZE)) return SEG_LOG_ERROR("Unsupported value %u for chunk_size", seg->device_id); if (dm_config_has_node(sn, "zero_new_blocks") && !dm_config_get_uint32(sn, "zero_new_blocks", &seg->zero_new_blocks)) return SEG_LOG_ERROR("Could not read zero_new_blocks for"); /* Read messages */ for (; sn; sn = sn->sib) if (!(sn->v) && !_thin_pool_add_message(seg, sn->key, sn->child)) return_0; return 1; } static int _thin_pool_text_import_area_count(const struct dm_config_node *sn, uint32_t *area_count) { *area_count = 1; return 1; } static int _thin_pool_text_export(const struct lv_segment *seg, struct formatter *f) { unsigned cnt = 0; const struct lv_thin_message *tmsg; outf(f, "metadata = \"%s\"", seg->metadata_lv->name); outf(f, "pool = \"%s\"", seg_lv(seg, 0)->name); outf(f, "transaction_id = %" PRIu64, seg->transaction_id); outsize(f, (uint64_t) seg->chunk_size, "chunk_size = %u", seg->chunk_size); switch (seg->discards) { case THIN_DISCARDS_PASSDOWN: case THIN_DISCARDS_NO_PASSDOWN: case THIN_DISCARDS_IGNORE: outf(f, "discards = \"%s\"", get_pool_discards_name(seg->discards)); break; default: log_error(INTERNAL_ERROR "Invalid discards value %d.", seg->discards); return 0; } if (seg->low_water_mark) outf(f, "low_water_mark = %" PRIu64, seg->low_water_mark); if (seg->zero_new_blocks) outf(f, "zero_new_blocks = 1"); dm_list_iterate_items(tmsg, &seg->thin_messages) { /* Extra validation */ switch (tmsg->type) { case DM_THIN_MESSAGE_CREATE_SNAP: case DM_THIN_MESSAGE_CREATE_THIN: if (!lv_is_thin_volume(tmsg->u.lv)) { log_error(INTERNAL_ERROR "LV %s is not a thin volume.", tmsg->u.lv->name); return 0; } break; default: break; } if (!cnt) outnl(f); outf(f, "message%d {", ++cnt); out_inc_indent(f); switch (tmsg->type) { case DM_THIN_MESSAGE_CREATE_SNAP: case DM_THIN_MESSAGE_CREATE_THIN: outf(f, "create = \"%s\"", tmsg->u.lv->name); break; case DM_THIN_MESSAGE_DELETE: outf(f, "delete = %d", tmsg->u.delete_id); break; default: log_error(INTERNAL_ERROR "Passed unsupported message."); return 0; } out_dec_indent(f); outf(f, "}"); } return 1; } #ifdef DEVMAPPER_SUPPORT static int _thin_target_present(struct cmd_context *cmd, const struct lv_segment *seg __attribute__((unused)), unsigned *attributes); static int _thin_pool_modules_needed(struct dm_pool *mem, const struct lv_segment *seg __attribute__((unused)), struct dm_list *modules) { if (!str_list_add(mem, modules, _thin_pool_module)) { log_error("String list allocation failed for thin_pool."); return 0; } return 1; } static int _thin_modules_needed(struct dm_pool *mem, const struct lv_segment *seg, struct dm_list *modules) { if (!_thin_pool_modules_needed(mem, seg, modules)) return_0; if (!str_list_add(mem, modules, _thin_module)) { log_error("String list allocation failed for thin."); return 0; } return 1; } static int _thin_pool_add_target_line(struct dev_manager *dm, struct dm_pool *mem, struct cmd_context *cmd, void **target_state __attribute__((unused)), struct lv_segment *seg, const struct lv_activate_opts *laopts, struct dm_tree_node *node, uint64_t len, uint32_t *pvmove_mirror_count __attribute__((unused))) { static int _no_discards = 0; char *metadata_dlid, *pool_dlid; const struct lv_thin_message *lmsg; const struct logical_volume *origin; struct lvinfo info; uint64_t transaction_id = 0; unsigned attr; if (!_thin_target_present(cmd, NULL, &attr)) return_0; if (!seg->metadata_lv) { log_error(INTERNAL_ERROR "Thin pool is missing metadata device."); return 0; } if (!(attr & THIN_FEATURE_BLOCK_SIZE) && (seg->chunk_size & (seg->chunk_size - 1))) { log_error("Thin pool target does not support %uKiB chunk size " "(needs kernel >= 3.6).", seg->chunk_size / 2); return 0; } if (!(metadata_dlid = build_dm_uuid(mem, seg->metadata_lv->lvid.s, NULL))) { log_error("Failed to build uuid for metadata LV %s.", seg->metadata_lv->name); return 0; } if (!(pool_dlid = build_dm_uuid(mem, seg_lv(seg, 0)->lvid.s, NULL))) { log_error("Failed to build uuid for pool LV %s.", seg_lv(seg, 0)->name); return 0; } if (!dm_tree_node_add_thin_pool_target(node, len, seg->transaction_id, metadata_dlid, pool_dlid, seg->chunk_size, seg->low_water_mark, seg->zero_new_blocks ? 0 : 1)) return_0; if (attr & THIN_FEATURE_DISCARDS) { /* Use ignore for discards ignore or non-power-of-2 chunk_size and <1.5 target */ /* FIXME: Check whether underlying dev supports discards */ if (((!(attr & THIN_FEATURE_DISCARDS_NON_POWER_2) && (seg->chunk_size & (seg->chunk_size - 1))) || (seg->discards == THIN_DISCARDS_IGNORE))) { if (!dm_tree_node_set_thin_pool_discard(node, 1, 0)) return_0; } else if (!dm_tree_node_set_thin_pool_discard(node, 0, (seg->discards == THIN_DISCARDS_NO_PASSDOWN))) return_0; } else if (seg->discards != THIN_DISCARDS_IGNORE) log_warn_suppress(_no_discards++, "WARNING: Thin pool target does " "not support discards (needs kernel >= 3.4)."); /* * Add messages only for activation tree. * Otherwise avoid checking for existence of suspended origin. * Also transation_id is checked only when snapshot origin is active. * (This might change later) */ if (!laopts->send_messages) return 1; dm_list_iterate_items(lmsg, &seg->thin_messages) { switch (lmsg->type) { case DM_THIN_MESSAGE_CREATE_THIN: origin = first_seg(lmsg->u.lv)->origin; /* Check if the origin is suspended */ if (origin && lv_info(cmd, origin, 1, &info, 0, 0) && info.exists && !info.suspended) { /* Origin is not suspended, but the transaction may have been * already transfered, so test for transaction_id and * allow to pass in the message for dmtree processing * so it will skip all messages later. */ if (!lv_thin_pool_transaction_id(seg->lv, &transaction_id)) return_0; /* Thin pool should exist and work */ if (transaction_id != seg->transaction_id) { log_error("Can't create snapshot %s as origin %s is not suspended.", lmsg->u.lv->name, origin->name); return 0; } } log_debug_activation("Thin pool create_%s %s.", (!origin) ? "thin" : "snap", lmsg->u.lv->name); if (!dm_tree_node_add_thin_pool_message(node, (!origin) ? lmsg->type : DM_THIN_MESSAGE_CREATE_SNAP, first_seg(lmsg->u.lv)->device_id, (!origin) ? 0 : first_seg(origin)->device_id)) return_0; break; case DM_THIN_MESSAGE_DELETE: log_debug_activation("Thin pool delete %u.", lmsg->u.delete_id); if (!dm_tree_node_add_thin_pool_message(node, lmsg->type, lmsg->u.delete_id, 0)) return_0; break; default: log_error(INTERNAL_ERROR "Unsupported message."); return 0; } } if (!dm_list_empty(&seg->thin_messages)) { /* Messages were passed, modify transaction_id as the last one */ log_debug_activation("Thin pool set transaction id %" PRIu64 ".", seg->transaction_id); if (!dm_tree_node_add_thin_pool_message(node, DM_THIN_MESSAGE_SET_TRANSACTION_ID, seg->transaction_id - 1, seg->transaction_id)) return_0; } return 1; } static int _thin_pool_target_percent(void **target_state __attribute__((unused)), percent_t *percent, struct dm_pool *mem, struct cmd_context *cmd __attribute__((unused)), struct lv_segment *seg, char *params, uint64_t *total_numerator, uint64_t *total_denominator) { struct dm_status_thin_pool *s; if (!dm_get_status_thin_pool(mem, params, &s)) return_0; /* With 'seg' report metadata percent, otherwice data percent */ if (seg) { *percent = make_percent(s->used_metadata_blocks, s->total_metadata_blocks); *total_numerator += s->used_metadata_blocks; *total_denominator += s->total_metadata_blocks; } else { *percent = make_percent(s->used_data_blocks, s->total_data_blocks); *total_numerator += s->used_data_blocks; *total_denominator += s->total_data_blocks; } return 1; } # ifdef DMEVENTD static const char *_get_thin_dso_path(struct cmd_context *cmd) { return get_monitor_dso_path(cmd, find_config_tree_str(cmd, dmeventd_thin_library_CFG, NULL)); } /* FIXME Cache this */ static int _target_registered(struct lv_segment *seg, int *pending) { return target_registered_with_dmeventd(seg->lv->vg->cmd, _get_thin_dso_path(seg->lv->vg->cmd), seg->lv, pending); } /* FIXME This gets run while suspended and performs banned operations. */ static int _target_set_events(struct lv_segment *seg, int evmask, int set) { /* FIXME Make timeout (10) configurable */ return target_register_events(seg->lv->vg->cmd, _get_thin_dso_path(seg->lv->vg->cmd), seg->lv, evmask, set, 10); } static int _target_register_events(struct lv_segment *seg, int events) { return _target_set_events(seg, events, 1); } static int _target_unregister_events(struct lv_segment *seg, int events) { return _target_set_events(seg, events, 0); } # endif /* DMEVENTD */ #endif /* DEVMAPPER_SUPPORT */ static const char *_thin_name(const struct lv_segment *seg) { return seg->segtype->name; } static int _thin_text_import(struct lv_segment *seg, const struct dm_config_node *sn, struct dm_hash_table *pv_hash __attribute__((unused))) { const char *lv_name; struct logical_volume *pool_lv, *origin = NULL, *external_lv = NULL, *merge_lv = NULL; if (!dm_config_get_str(sn, "thin_pool", &lv_name)) return SEG_LOG_ERROR("Thin pool must be a string in"); if (!(pool_lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown thin pool %s in", lv_name); if (!dm_config_get_uint64(sn, "transaction_id", &seg->transaction_id)) return SEG_LOG_ERROR("Could not read transaction_id for"); if (dm_config_has_node(sn, "origin")) { if (!dm_config_get_str(sn, "origin", &lv_name)) return SEG_LOG_ERROR("Origin must be a string in"); if (!(origin = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown origin %s in", lv_name); } if (dm_config_has_node(sn, "merge")) { if (!dm_config_get_str(sn, "merge", &lv_name)) return SEG_LOG_ERROR("Merge lv must be a string in"); if (!(merge_lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown merge lv %s in", lv_name); } if (!dm_config_get_uint32(sn, "device_id", &seg->device_id)) return SEG_LOG_ERROR("Could not read device_id for"); if (seg->device_id > DM_THIN_MAX_DEVICE_ID) return SEG_LOG_ERROR("Unsupported value %u for device_id", seg->device_id); if (dm_config_has_node(sn, "external_origin")) { if (!dm_config_get_str(sn, "external_origin", &lv_name)) return SEG_LOG_ERROR("External origin must be a string in"); if (!(external_lv = find_lv(seg->lv->vg, lv_name))) return SEG_LOG_ERROR("Unknown external origin %s in", lv_name); } if (!attach_pool_lv(seg, pool_lv, origin, merge_lv)) return_0; if (!attach_thin_external_origin(seg, external_lv)) return_0; return 1; } static int _thin_text_export(const struct lv_segment *seg, struct formatter *f) { outf(f, "thin_pool = \"%s\"", seg->pool_lv->name); outf(f, "transaction_id = %" PRIu64, seg->transaction_id); outf(f, "device_id = %d", seg->device_id); if (seg->external_lv) outf(f, "external_origin = \"%s\"", seg->external_lv->name); if (seg->origin) outf(f, "origin = \"%s\"", seg->origin->name); if (seg->merge_lv) outf(f, "merge = \"%s\"", seg->merge_lv->name); return 1; } #ifdef DEVMAPPER_SUPPORT static int _thin_add_target_line(struct dev_manager *dm, struct dm_pool *mem, struct cmd_context *cmd __attribute__((unused)), void **target_state __attribute__((unused)), struct lv_segment *seg, const struct lv_activate_opts *laopts, struct dm_tree_node *node, uint64_t len, uint32_t *pvmove_mirror_count __attribute__((unused))) { char *pool_dlid, *external_dlid; uint32_t device_id = seg->device_id; unsigned attr; if (!seg->pool_lv) { log_error(INTERNAL_ERROR "Segment %s has no pool.", seg->lv->name); return 0; } if (!(pool_dlid = build_dm_uuid(mem, seg->pool_lv->lvid.s, lv_layer(seg->pool_lv)))) { log_error("Failed to build uuid for pool LV %s.", seg->pool_lv->name); return 0; } if (!laopts->no_merging) { if (seg->merge_lv) { log_error(INTERNAL_ERROR "Failed to add merged segment of %s.", seg->lv->name); return 0; } /* * merge support for thinp snapshots is implemented by * simply swapping the thinp device_id of the snapshot * and origin. */ if (lv_is_merging_origin(seg->lv) && seg_is_thin_volume(find_snapshot(seg->lv))) /* origin, use merging snapshot's device_id */ device_id = find_snapshot(seg->lv)->device_id; } if (!dm_tree_node_add_thin_target(node, len, pool_dlid, device_id)) return_0; /* Add external origin LV */ if (seg->external_lv) { if (!pool_supports_external_origin(first_seg(seg->pool_lv), seg->external_lv)) return_0; if (seg->external_lv->size < seg->lv->size) { /* Validate target supports smaller external origin */ if (!_thin_target_present(cmd, NULL, &attr) || !(attr & THIN_FEATURE_EXTERNAL_ORIGIN_EXTEND)) { log_error("Thin target does not support smaller size of external origin LV %s.", seg->external_lv->name); return 0; } } if (!(external_dlid = build_dm_uuid(mem, seg->external_lv->lvid.s, lv_layer(seg->external_lv)))) { log_error("Failed to build uuid for external origin LV %s.", seg->external_lv->name); return 0; } if (!dm_tree_node_set_thin_external_origin(node, external_dlid)) return_0; } return 1; } static int _thin_target_percent(void **target_state __attribute__((unused)), percent_t *percent, struct dm_pool *mem, struct cmd_context *cmd __attribute__((unused)), struct lv_segment *seg, char *params, uint64_t *total_numerator, uint64_t *total_denominator) { struct dm_status_thin *s; /* Status for thin device is in sectors */ if (!dm_get_status_thin(mem, params, &s)) return_0; if (seg) { *percent = make_percent(s->mapped_sectors, seg->lv->size); *total_denominator += seg->lv->size; } else { /* No lv_segment info here */ *percent = PERCENT_INVALID; /* FIXME: Using denominator to pass the mapped info upward? */ *total_denominator += s->highest_mapped_sector; } *total_numerator += s->mapped_sectors; return 1; } static int _thin_target_present(struct cmd_context *cmd, const struct lv_segment *seg __attribute__((unused)), unsigned *attributes) { /* List of features with their kernel target version */ static const struct feature { uint32_t maj; uint32_t min; unsigned thin_feature; const char *feature; } const _features[] = { { 1, 1, THIN_FEATURE_DISCARDS, "discards" }, { 1, 1, THIN_FEATURE_EXTERNAL_ORIGIN, "external_origin" }, { 1, 4, THIN_FEATURE_BLOCK_SIZE, "block_size" }, { 1, 5, THIN_FEATURE_DISCARDS_NON_POWER_2, "discards_non_power_2" }, { 1, 10, THIN_FEATURE_METADATA_RESIZE, "metadata_resize" }, { 9, 11, THIN_FEATURE_EXTERNAL_ORIGIN_EXTEND, "external_origin_extend" }, }; static const char _lvmconf[] = "global/thin_disabled_features"; static int _checked = 0; static int _present = 0; static unsigned _attrs = 0; uint32_t maj, min, patchlevel; unsigned i; const struct dm_config_node *cn; const struct dm_config_value *cv; const char *str; if (!_checked) { _present = target_present(cmd, _thin_pool_module, 1); if (!target_version(_thin_pool_module, &maj, &min, &patchlevel)) { log_error("Cannot read %s target version.", _thin_pool_module); return 0; } for (i = 0; i < sizeof(_features)/sizeof(*_features); i++) if ((maj > _features[i].maj) || (maj == _features[i].maj && min >= _features[i].min)) _attrs |= _features[i].thin_feature; else log_very_verbose("Target %s does not support %s.", _thin_pool_module, _features[i].feature); _checked = 1; } if (attributes) { if (!_feature_mask) { /* Support runtime lvm.conf changes, N.B. avoid 32 feature */ if ((cn = find_config_tree_node(cmd, global_thin_disabled_features_CFG, NULL))) { for (cv = cn->v; cv; cv = cv->next) { if (cv->type != DM_CFG_STRING) { log_error("Ignoring invalid string in config file %s.", _lvmconf); continue; } str = cv->v.str; if (!*str) { log_error("Ignoring empty string in config file %s.", _lvmconf); continue; } for (i = 0; i < sizeof(_features)/sizeof(*_features); i++) if (strcasecmp(str, _features[i].feature) == 0) _feature_mask |= _features[i].thin_feature; } } _feature_mask = ~_feature_mask; for (i = 0; i < sizeof(_features)/sizeof(*_features); i++) if ((_attrs & _features[i].thin_feature) && !(_feature_mask & _features[i].thin_feature)) log_very_verbose("Target %s %s support disabled by %s", _thin_pool_module, _features[i].feature, _lvmconf); } *attributes = _attrs & _feature_mask; } return _present; } #endif static void _thin_destroy(struct segment_type *segtype) { dm_free(segtype); } static struct segtype_handler _thin_pool_ops = { .name = _thin_pool_name, .text_import = _thin_pool_text_import, .text_import_area_count = _thin_pool_text_import_area_count, .text_export = _thin_pool_text_export, #ifdef DEVMAPPER_SUPPORT .add_target_line = _thin_pool_add_target_line, .target_percent = _thin_pool_target_percent, .target_present = _thin_target_present, # ifdef DMEVENTD .target_monitored = _target_registered, .target_monitor_events = _target_register_events, .target_unmonitor_events = _target_unregister_events, # endif /* DMEVENTD */ #endif #ifdef DEVMAPPER_SUPPORT .modules_needed = _thin_pool_modules_needed, #endif .destroy = _thin_destroy, }; static struct segtype_handler _thin_ops = { .name = _thin_name, .text_import = _thin_text_import, .text_export = _thin_text_export, #ifdef DEVMAPPER_SUPPORT .add_target_line = _thin_add_target_line, .target_percent = _thin_target_percent, .target_present = _thin_target_present, .modules_needed = _thin_modules_needed, #endif .destroy = _thin_destroy, }; #ifdef THIN_INTERNAL int init_thin_segtypes(struct cmd_context *cmd, struct segtype_library *seglib) #else /* Shared */ int init_multiple_segtypes(struct cmd_context *cmd, struct segtype_library *seglib); int init_multiple_segtypes(struct cmd_context *cmd, struct segtype_library *seglib) #endif { static const struct { struct segtype_handler *ops; const char name[16]; uint32_t flags; } reg_segtypes[] = { { &_thin_pool_ops, "thin-pool", SEG_THIN_POOL }, /* FIXME Maybe use SEG_THIN_VOLUME instead of SEG_VIRTUAL */ { &_thin_ops, "thin", SEG_THIN_VOLUME | SEG_VIRTUAL } }; struct segment_type *segtype; unsigned i; for (i = 0; i < sizeof(reg_segtypes)/sizeof(reg_segtypes[0]); ++i) { segtype = dm_zalloc(sizeof(*segtype)); if (!segtype) { log_error("Failed to allocate memory for %s segtype", reg_segtypes[i].name); return 0; } segtype->ops = reg_segtypes[i].ops; segtype->name = reg_segtypes[i].name; segtype->flags = reg_segtypes[i].flags; #ifdef DEVMAPPER_SUPPORT # ifdef DMEVENTD if ((reg_segtypes[i].flags & SEG_THIN_POOL) && _get_thin_dso_path(cmd)) segtype->flags |= SEG_MONITORED; # endif /* DMEVENTD */ #endif if (!lvm_register_segtype(seglib, segtype)) /* segtype is already destroyed */ return_0; log_very_verbose("Initialised segtype: %s", segtype->name); } /* Reset mask for recalc */ _feature_mask = 0; return 1; }