1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00
lvm2/lib/activate/activate.h

305 lines
12 KiB
C
Raw Normal View History

/*
2004-03-30 23:35:44 +04:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
2001-10-16 20:25:28 +04:00
#ifndef LVM_ACTIVATE_H
#define LVM_ACTIVATE_H
#include "lib/metadata/metadata-exported.h"
struct lvinfo {
int exists;
int suspended;
unsigned int open_count;
int major;
int minor;
int read_only;
int live_table;
int inactive_table;
uint32_t read_ahead;
};
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
typedef enum {
SEG_STATUS_NONE,
SEG_STATUS_CACHE,
SEG_STATUS_RAID,
SEG_STATUS_SNAPSHOT,
SEG_STATUS_THIN,
SEG_STATUS_THIN_POOL,
SEG_STATUS_VDO_POOL,
SEG_STATUS_WRITECACHE,
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
SEG_STATUS_INTEGRITY,
SEG_STATUS_UNKNOWN
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
} lv_seg_status_type_t;
struct lv_seg_status {
struct dm_pool *mem; /* input */
const struct lv_segment *seg; /* input */
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
lv_seg_status_type_t type; /* output */
union {
struct dm_status_cache *cache;
struct dm_status_raid *raid;
struct dm_status_snapshot *snapshot;
struct dm_status_thin *thin;
struct dm_status_thin_pool *thin_pool;
struct dm_status_writecache *writecache;
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
struct dm_status_integrity *integrity;
struct lv_status_vdo vdo_pool;
};
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
};
struct lv_with_info_and_seg_status {
int info_ok;
const struct logical_volume *lv; /* output */
struct lvinfo info; /* output */
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
int seg_part_of_lv; /* output */
struct lv_seg_status seg_status; /* output, see lv_seg_status */
/* TODO: add extra status for snapshot origin */
refactor: rename struct lv_with_info used in reporting code to lv_with_info_and_seg_status The former struct lv_with_info is renamed to lv_with_info_and_seg_status as it can hold more than just "info", there's lv's segment status now in addition: struct lv_with_info_and_seg_status { struct logical_volume *lv; struct lvinfo *info; struct lv_seg_status *seg_status; } Where struct lv_seg_status is: struct lv_seg_status { struct dm_pool *mem; struct lv_segment lv_seg; lv_seg_status_type_t type; void *status; /* struct dm_status_* */ } Where lv_seg points to lv's segment that is being reported or processed in general. New struct lv_seg_status keeps the information about segment status - the status retrieved via DM_DEVICE_STATUS ioctl. This information will be used for reporting dm device target status for the LV segment specified. So this patch introduces third level of LV information that is kept for reuse while reporting fields within one reporting line, causing only one DM_DEVICE_STATUS ioctl call per LV segment line reported (otherwise we'd need to call the DM_DEVICE_STATUS for each segment status field in one LV segment/reporting line which is not efficient). This is following exactly the same principle as already introduced by commit ecb2be5d1642aa0142d216f9e52f64fd3e8c3fc8. So currently we have three levels of information that can be used to report an LV/LV segment: - LV metadata itself (struct logical_volume *lv) - LV's DM_DEVICE_INFO ioctl result (struct lvinfo *info) - LV's segment DM_DEVICE_STATUS ioctl result (this status must be bound to a segment, not the whole LV as the whole LV may be composed of several segments of course) (this is the new struct lv_seg_status *seg_status)
2014-10-20 15:46:50 +04:00
};
struct lv_activate_opts {
int exclusive;
int origin_only;
int no_merging;
int send_messages;
int skip_in_use;
unsigned revert;
unsigned read_only;
activation: flag temporary LVs internally Add LV_TEMPORARY flag for LVs with limited existence during command execution. Such LVs are temporary in way that they need to be activated, some action done and then removed immediately. Such LVs are just like any normal LV - the only difference is that they are removed during LVM command execution. This is also the case for LVs representing future pool metadata spare LVs which we need to initialize by using the usual LV before they are declared as pool metadata spare. We can optimize some other parts like udev to do a better job if it knows that the LV is temporary and any processing on it is just useless. This flag is orthogonal to LV_NOSCAN flag introduced recently as LV_NOSCAN flag is primarily used to mark an LV for the scanning to be avoided before the zeroing of the device happens. The LV_TEMPORARY flag makes a difference between a full-fledged LV visible in the system and the LV just used as a temporary overlay for some action that needs to be done on underlying PVs. For example: lvcreate --thinpool POOL --zero n -L 1G vg - first, the usual LV is created to do a clean up for pool metadata spare. The LV is activated, zeroed, deactivated. - between "activated" and "zeroed" stage, the LV_NOSCAN flag is used to avoid any scanning in udev - betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH udev rule, but since the LV is just a usual LV, we can't make a difference. The LV_TEMPORARY internal LV flag helps here. If we create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is with "invisible" and non-top-level LVs) - udev is directed to skip WATCH rule use. - if the LV_TEMPORARY flag was not used, there would normally be a WATCH event generated once the LV is closed after "zeroed" stage. This will make problems with immediated deactivation that follows.
2013-10-23 16:06:39 +04:00
unsigned noscan; /* Mark this LV to avoid its scanning. This also
directs udev to use proper udev flag to avoid
any scanning in udev. This udev flag is automatically
dropped in udev db on any spurious event that follows. */
unsigned temporary; /* Mark this LV as temporary. It means, the LV
* is created, used and deactivated within single
* LVM command execution. Such LVs are mostly helper
* LVs to do some action or cleanup before the proper
* LV is created. This also directs udev to use proper
* set of flags to avoid any scanning in udev. These udev
* flags are persistent in udev db for any spurious event
* that follows. */
unsigned resuming; /* Set when resuming after a suspend. */
const struct logical_volume *component_lv;
};
void set_activation(int activation, int silent);
int activation(void);
2002-11-18 17:01:16 +03:00
int driver_version(char *version, size_t size);
int library_version(char *version, size_t size);
int module_present(struct cmd_context *cmd, const char *target_name);
int target_present_version(struct cmd_context *cmd, const char *target_name,
int use_modprobe, uint32_t *maj,
uint32_t *min, uint32_t *patchlevel);
int target_present(struct cmd_context *cmd, const char *target_name,
int use_modprobe);
int target_version(const char *target_name, uint32_t *maj,
uint32_t *min, uint32_t *patchlevel);
int get_dm_active_devices(const struct volume_group *vg, struct dm_list **devs,
unsigned *devs_features);
int devno_dm_uuid(struct cmd_context *cmd, int major, int minor,
char *uuid_buf, size_t uuid_buf_size);
int dev_dm_uuid(struct cmd_context *cmd, struct device *dev,
char *uuid_buf, size_t uuid_buf_size);
int raid4_is_supported(struct cmd_context *cmd, const struct segment_type *segtype);
2011-11-11 20:41:37 +04:00
int lvm_dm_prefix_check(int major, int minor, const char *prefix);
int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
struct dm_list *modules);
int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
struct dm_list *modules);
2004-03-26 22:52:09 +03:00
void activation_release(void);
void activation_exit(void);
/* int lv_suspend(struct cmd_context *cmd, const char *lvid_s); */
int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive,
const struct logical_volume *lv, const struct logical_volume *lv_pre);
int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, const struct logical_volume *lv);
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
unsigned origin_only, unsigned exclusive, unsigned revert, const struct logical_volume *lv);
activation: flag temporary LVs internally Add LV_TEMPORARY flag for LVs with limited existence during command execution. Such LVs are temporary in way that they need to be activated, some action done and then removed immediately. Such LVs are just like any normal LV - the only difference is that they are removed during LVM command execution. This is also the case for LVs representing future pool metadata spare LVs which we need to initialize by using the usual LV before they are declared as pool metadata spare. We can optimize some other parts like udev to do a better job if it knows that the LV is temporary and any processing on it is just useless. This flag is orthogonal to LV_NOSCAN flag introduced recently as LV_NOSCAN flag is primarily used to mark an LV for the scanning to be avoided before the zeroing of the device happens. The LV_TEMPORARY flag makes a difference between a full-fledged LV visible in the system and the LV just used as a temporary overlay for some action that needs to be done on underlying PVs. For example: lvcreate --thinpool POOL --zero n -L 1G vg - first, the usual LV is created to do a clean up for pool metadata spare. The LV is activated, zeroed, deactivated. - between "activated" and "zeroed" stage, the LV_NOSCAN flag is used to avoid any scanning in udev - betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH udev rule, but since the LV is just a usual LV, we can't make a difference. The LV_TEMPORARY internal LV flag helps here. If we create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is with "invisible" and non-top-level LVs) - udev is directed to skip WATCH rule use. - if the LV_TEMPORARY flag was not used, there would normally be a WATCH event generated once the LV is closed after "zeroed" stage. This will make problems with immediated deactivation that follows.
2013-10-23 16:06:39 +04:00
int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive,
int noscan, int temporary, const struct logical_volume *lv);
activation: flag temporary LVs internally Add LV_TEMPORARY flag for LVs with limited existence during command execution. Such LVs are temporary in way that they need to be activated, some action done and then removed immediately. Such LVs are just like any normal LV - the only difference is that they are removed during LVM command execution. This is also the case for LVs representing future pool metadata spare LVs which we need to initialize by using the usual LV before they are declared as pool metadata spare. We can optimize some other parts like udev to do a better job if it knows that the LV is temporary and any processing on it is just useless. This flag is orthogonal to LV_NOSCAN flag introduced recently as LV_NOSCAN flag is primarily used to mark an LV for the scanning to be avoided before the zeroing of the device happens. The LV_TEMPORARY flag makes a difference between a full-fledged LV visible in the system and the LV just used as a temporary overlay for some action that needs to be done on underlying PVs. For example: lvcreate --thinpool POOL --zero n -L 1G vg - first, the usual LV is created to do a clean up for pool metadata spare. The LV is activated, zeroed, deactivated. - between "activated" and "zeroed" stage, the LV_NOSCAN flag is used to avoid any scanning in udev - betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH udev rule, but since the LV is just a usual LV, we can't make a difference. The LV_TEMPORARY internal LV flag helps here. If we create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is with "invisible" and non-top-level LVs) - udev is directed to skip WATCH rule use. - if the LV_TEMPORARY flag was not used, there would normally be a WATCH event generated once the LV is closed after "zeroed" stage. This will make problems with immediated deactivation that follows.
2013-10-23 16:06:39 +04:00
int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive,
int noscan, int temporary, const struct logical_volume *lv);
int lv_deactivate(struct cmd_context *cmd, const char *lvid_s, const struct logical_volume *lv);
int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv);
int lv_deactivate_any_missing_subdevs(const struct logical_volume *lv);
int activate_lv(struct cmd_context *cmd, const struct logical_volume *lv);
int deactivate_lv(struct cmd_context *cmd, const struct logical_volume *lv);
int suspend_lv(struct cmd_context *cmd, const struct logical_volume *lv);
int suspend_lv_origin(struct cmd_context *cmd, const struct logical_volume *lv);
int resume_lv(struct cmd_context *cmd, const struct logical_volume *lv);
int resume_lv_origin(struct cmd_context *cmd, const struct logical_volume *lv);
int revert_lv(struct cmd_context *cmd, const struct logical_volume *lv);
2002-02-11 20:42:02 +03:00
/*
* Returns 1 if info structure has been populated, else 0 on failure.
* When lvinfo* is NULL, it returns 1 if the device is locally active, 0 otherwise.
2002-02-11 20:42:02 +03:00
*/
int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
struct lvinfo *info, int with_open_count, int with_read_ahead);
int lv_info_with_name_check(struct cmd_context *cmd, const struct logical_volume *lv,
int use_layer, struct lvinfo *info);
2004-03-08 21:54:13 +03:00
/*
* Returns 1 if lv_info_and_seg_status structure has been populated,
* else 0 on failure or if device not active locally.
*
* lv_info_with_seg_status is the same as calling lv_info and then lv_status,
* but this fn tries to do that with one ioctl if possible.
*/
int lv_info_with_seg_status(struct cmd_context *cmd,
const struct lv_segment *lv_seg,
struct lv_with_info_and_seg_status *status,
int with_open_count, int with_read_ahead);
int lv_check_not_in_use(const struct logical_volume *lv, int error_if_used);
2004-03-08 21:54:13 +03:00
/*
* Returns 1 if activate has been set: 1 = activate; 0 = don't.
2004-03-08 21:54:13 +03:00
*/
int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
int *activate, const struct logical_volume *lv);
/*
* Checks against the auto_activation_volume_list and
* returns 1 if the LV should be activated, 0 otherwise.
*/
int lv_passes_auto_activation_filter(struct cmd_context *cmd, struct logical_volume *lv);
2004-03-08 21:54:13 +03:00
int lv_check_transient(struct logical_volume *lv);
/*
* Returns 1 if percent has been set, else 0.
*/
int lv_snapshot_percent(const struct logical_volume *lv, dm_percent_t *percent);
int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
int wait, dm_percent_t *percent, uint32_t *event_nr);
int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent);
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt);
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset);
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health);
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt);
int lv_raid_sync_action(const struct logical_volume *lv, char **sync_action);
int lv_raid_message(const struct logical_volume *lv, const char *msg);
int lv_raid_status(const struct logical_volume *lv, struct lv_status_raid **status);
int lv_writecache_message(const struct logical_volume *lv, const char *msg);
int lv_cache_status(const struct logical_volume *cache_lv,
struct lv_status_cache **status);
int lv_thin_device_id(const struct logical_volume *lv, uint32_t *device_id);
int lv_thin_status(const struct logical_volume *lv, int flush,
struct lv_status_thin **status);
int lv_thin_pool_status(const struct logical_volume *lv, int flush,
struct lv_status_thin_pool **status);
int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
struct lv_status_vdo **status);
int lv_vdo_pool_percent(const struct logical_volume *lv, dm_percent_t *percent);
int lv_vdo_pool_size_config(const struct logical_volume *lv,
struct vdo_pool_size_config *cfg);
2001-11-07 18:02:07 +03:00
2001-11-07 14:51:42 +03:00
/*
* Return number of LVs in the VG that are active.
2001-11-07 14:51:42 +03:00
*/
int lvs_in_vg_activated(const struct volume_group *vg);
int lvs_in_vg_opened(const struct volume_group *vg);
2001-10-16 20:25:28 +04:00
int lv_is_active(const struct logical_volume *lv);
int lv_passes_readonly_filter(const struct logical_volume *lv);
/* Check is any component LV is active */
const struct logical_volume *lv_component_is_active(const struct logical_volume *lv);
const struct logical_volume *lv_holder_is_active(const struct logical_volume *lv);
int deactivate_lv_with_sub_lv(const struct logical_volume *lv);
int lv_has_target_type(struct dm_pool *mem, const struct logical_volume *lv,
const char *layer, const char *target_type);
int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume *lv,
const struct lv_activate_opts *laopts, int monitor);
#ifdef DMEVENTD
2018-06-04 13:25:48 +03:00
# include "daemons/dmeventd/libdevmapper-event.h"
char *get_monitor_dso_path(struct cmd_context *cmd, int id);
int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
const struct logical_volume *lv, int *pending, int *monitored);
int target_register_events(struct cmd_context *cmd, const char *dso, const struct logical_volume *lv,
int evmask __attribute__((unused)), int set, int timeout);
#endif
int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size,
uint32_t extent_size, int use_linear_target,
const char *vgname, const char *lvname);
/*
* Returns 1 if PV has a dependency tree that uses anything in VG.
*/
int pv_uses_vg(struct physical_volume *pv,
struct volume_group *vg);
struct dev_usable_check_params {
unsigned int check_empty:1;
unsigned int check_blocked:1;
unsigned int check_suspended:1;
unsigned int check_error_target:1;
unsigned int check_reserved:1;
unsigned int check_lv:1;
};
/*
* Returns 1 if mapped device is not suspended, blocked or
* is using a reserved name.
*/
int dm_device_is_usable(struct cmd_context *cmd, struct device *dev, struct dev_usable_check_params check, int *is_lv);
/*
* Declaration moved here from fs.h to keep header fs.h hidden
*/
void fs_unlock(void);
#define TARGET_NAME_CACHE "cache"
#define TARGET_NAME_WRITECACHE "writecache"
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
#define TARGET_NAME_INTEGRITY "integrity"
#define TARGET_NAME_ERROR "error"
#define TARGET_NAME_ERROR_OLD "erro" /* Truncated in older kernels */
#define TARGET_NAME_LINEAR "linear"
#define TARGET_NAME_MIRROR "mirror"
#define TARGET_NAME_RAID "raid"
#define TARGET_NAME_SNAPSHOT "snapshot"
#define TARGET_NAME_SNAPSHOT_MERGE "snapshot-merge"
#define TARGET_NAME_SNAPSHOT_ORIGIN "snapshot-origin"
#define TARGET_NAME_STRIPED "striped"
#define TARGET_NAME_THIN "thin"
#define TARGET_NAME_THIN_POOL "thin-pool"
#define TARGET_NAME_VDO "vdo"
#define TARGET_NAME_ZERO "zero"
#define MODULE_NAME_CLUSTERED_MIRROR "clog"
#define MODULE_NAME_CACHE TARGET_NAME_CACHE
#define MODULE_NAME_WRITECACHE TARGET_NAME_WRITECACHE
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
#define MODULE_NAME_INTEGRITY TARGET_NAME_INTEGRITY
#define MODULE_NAME_ERROR TARGET_NAME_ERROR
#define MODULE_NAME_LOG_CLUSTERED "log-clustered"
#define MODULE_NAME_LOG_USERSPACE "log-userspace"
#define MODULE_NAME_MIRROR TARGET_NAME_MIRROR
#define MODULE_NAME_SNAPSHOT TARGET_NAME_SNAPSHOT
#define MODULE_NAME_RAID TARGET_NAME_RAID
#define MODULE_NAME_VDO "kvdo" /* does NOT use dm- prefix */
#define MODULE_NAME_ZERO TARGET_NAME_ZERO
#endif