1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/lib/device/dev-type.c

1373 lines
36 KiB
C
Raw Normal View History

2001-09-25 16:49:28 +04:00
/*
* Copyright (C) 2013 Red Hat, Inc. All rights reserved.
2001-09-25 16:49:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
2001-09-25 16:49:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2001-09-25 16:49:28 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2001-09-25 16:49:28 +04:00
*/
#include "base/memory/zalloc.h"
#include "lib/misc/lib.h"
#include "lib/device/dev-type.h"
2018-12-14 15:10:25 +03:00
#include "lib/device/device-types.h"
#include "lib/mm/xlate.h"
#include "lib/config/config.h"
#include "lib/metadata/metadata.h"
#include "lib/device/bcache.h"
#include "lib/label/label.h"
#include "lib/commands/toolcontext.h"
#include "lib/activate/activate.h"
#include "device_mapper/misc/dm-ioctl.h"
2004-11-23 14:44:04 +03:00
#ifdef BLKID_WIPING_SUPPORT
#include <blkid/blkid.h>
#endif
#ifdef UDEV_SYNC_SUPPORT
#include <libudev.h>
#include "lib/device/dev-ext-udev-constants.h"
#endif
2018-12-14 15:10:25 +03:00
#include <libgen.h>
#include <ctype.h>
#include <dirent.h>
/*
* An nvme device has major number 259 (BLKEXT), minor number <minor>,
* and reading /sys/dev/block/259:<minor>/device/dev shows a character
* device cmajor:cminor where cmajor matches the major number of the
* nvme character device entry in /proc/devices. Checking all of that
* is excessive and unnecessary compared to just comparing /dev/name*.
*/
int dev_is_nvme(struct dev_types *dt, struct device *dev)
{
return (dev->flags & DEV_IS_NVME) ? 1 : 0;
}
int dev_is_lv(struct cmd_context *cmd, struct device *dev)
{
char buffer[128];
if (device_get_uuid(cmd, MAJOR(dev->dev), MINOR(dev->dev),
buffer, sizeof(buffer)) &&
!strncmp(buffer, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
return 1;
return 0;
}
int dev_is_used_by_active_lv(struct cmd_context *cmd, struct device *dev, int *used_by_lv_count,
char **used_by_dm_name, char **used_by_vg_uuid, char **used_by_lv_uuid)
{
char holders_path[PATH_MAX];
char dm_dev_path[PATH_MAX];
char dm_uuid[DM_UUID_LEN];
struct stat info;
DIR *d;
struct dirent *dirent;
char *holder_name;
unsigned dm_dev_major, dm_dev_minor;
2024-05-26 12:30:06 +03:00
const size_t lvm_prefix_len = sizeof(UUID_PREFIX) - 1;
const size_t lvm_uuid_len = lvm_prefix_len + 2 * ID_LEN;
size_t uuid_len;
int used_count = 0;
char *used_name = NULL;
char *used_vgid = NULL;
char *used_lvid = NULL;
/*
* An LV using this device will be listed as a "holder" in the device's
* sysfs "holders" dir.
*/
if (dm_snprintf(holders_path, sizeof(holders_path), "%sdev/block/%u:%u/holders/",
dm_sysfs_dir(), MAJOR(dev->dev), MINOR(dev->dev)) < 0) {
log_error("%s: dm_snprintf failed for path to holders directory.", dev_name(dev));
return 0;
}
if (!(d = opendir(holders_path)))
return 0;
while ((dirent = readdir(d))) {
if (!strcmp(".", dirent->d_name) || !strcmp("..", dirent->d_name))
continue;
holder_name = dirent->d_name;
/*
* dirent->d_name is the dev name of the holder, e.g. "dm-1"
* from this name, create path "/dev/dm-1" to run stat on.
*/
if (dm_snprintf(dm_dev_path, sizeof(dm_dev_path), "%s/%s", cmd->dev_dir, holder_name) < 0)
continue;
/*
* stat "/dev/dm-1" which is the holder of the dev we're checking
* dm_dev_major:dm_dev_minor come from stat("/dev/dm-1")
*/
if (stat(dm_dev_path, &info))
continue;
dm_dev_major = MAJOR(info.st_rdev);
dm_dev_minor = MINOR(info.st_rdev);
if (dm_dev_major != cmd->dev_types->device_mapper_major)
continue;
/*
* if "dm-1" is a dm device, then check if it's an LVM LV
* by reading DM status and seeing if the uuid begins
* with UUID_PREFIX ("LVM-")
*/
if (!device_get_uuid(cmd, dm_dev_major, dm_dev_minor, dm_uuid, sizeof(dm_uuid)))
continue;
2024-05-26 12:30:06 +03:00
if (!strncmp(dm_uuid, UUID_PREFIX, lvm_prefix_len))
used_count++;
if (used_by_dm_name && !used_name)
used_name = dm_pool_strdup(cmd->mem, holder_name);
if (!used_by_vg_uuid && !used_by_lv_uuid)
continue;
/*
* UUID for LV is either "LVM-<vg_uuid><lv_uuid>" or
* "LVM-<vg_uuid><lv_uuid>-<suffix>", where vg_uuid and lv_uuid
* has length of ID_LEN and suffix len is not restricted (only
* restricted by whole DM UUID max len).
*/
uuid_len = strlen(dm_uuid);
if (((uuid_len == lvm_uuid_len) ||
((uuid_len > lvm_uuid_len) && (dm_uuid[lvm_uuid_len] == '-'))) &&
!strncmp(dm_uuid, UUID_PREFIX, lvm_prefix_len)) {
if (used_by_vg_uuid && !used_vgid)
used_vgid = dm_pool_strndup(cmd->mem, dm_uuid + lvm_prefix_len, ID_LEN);
if (used_by_lv_uuid && !used_lvid)
used_lvid = dm_pool_strndup(cmd->mem, dm_uuid + lvm_prefix_len + ID_LEN, ID_LEN);
}
}
if (closedir(d))
log_sys_debug("closedir", holders_path);
if (used_by_lv_count)
*used_by_lv_count = used_count;
if (used_by_dm_name)
*used_by_dm_name = used_name;
if (used_by_vg_uuid)
*used_by_vg_uuid = used_vgid;
if (used_by_lv_uuid)
*used_by_lv_uuid = used_lvid;
if (used_count)
return 1;
return 0;
}
struct dev_types *create_dev_types(const char *proc_dir,
const struct dm_config_node *cn)
{
struct dev_types *dt;
char line[80];
char proc_devices[PATH_MAX];
FILE *pd = NULL;
int i, j = 0;
int line_maj = 0;
int blocksection = 0;
size_t dev_len = 0;
const struct dm_config_value *cv;
const char *name;
char *nl;
if (!(dt = zalloc(sizeof(struct dev_types)))) {
log_error("Failed to allocate device type register.");
return NULL;
}
if (!*proc_dir) {
log_verbose("No proc filesystem found: using all block device types");
for (i = 0; i < NUMBER_OF_MAJORS; i++)
dt->dev_type_array[i].max_partitions = 1;
return dt;
}
if (dm_snprintf(proc_devices, sizeof(proc_devices),
"%s/devices", proc_dir) < 0) {
log_error("Failed to create /proc/devices string");
goto bad;
}
if (!(pd = fopen(proc_devices, "r"))) {
log_sys_error("fopen", proc_devices);
goto bad;
}
while (fgets(line, sizeof(line), pd) != NULL) {
i = 0;
while (line[i] == ' ')
i++;
/* If it's not a number it may be name of section */
line_maj = atoi(line + i);
if (line_maj < 0 || line_maj >= NUMBER_OF_MAJORS) {
/*
* Device numbers shown in /proc/devices are actually direct
* numbers passed to registering function, however the kernel
* uses only 12 bits, so use just 12 bits for major.
*/
if ((nl = strchr(line, '\n'))) *nl = '\0';
log_warn("WARNING: /proc/devices line: %s, replacing major with %d.",
line, line_maj & (NUMBER_OF_MAJORS - 1));
line_maj &= (NUMBER_OF_MAJORS - 1);
}
if (!line_maj) {
blocksection = (line[i] == 'B') ? 1 : 0;
continue;
}
/* We only want block devices ... */
if (!blocksection)
continue;
/* Find the start of the device major name */
while (line[i] != ' ' && line[i] != '\0')
i++;
while (line[i] == ' ')
i++;
/* Major is SCSI device */
if (!strncmp("sd", line + i, 2) && isspace(*(line + i + 2)))
dt->dev_type_array[line_maj].flags |= PARTITION_SCSI_DEVICE;
else if (!strncmp("loop", line + i, 4) && isspace(*(line + i + 4)))
dt->loop_major = line_maj;
/* Look for device-mapper device */
/* FIXME Cope with multiple majors */
else if (!strncmp("device-mapper", line + i, 13) && isspace(*(line + i + 13)))
dt->device_mapper_major = line_maj;
/* Look for md device */
else if (!strncmp("md", line + i, 2) && isspace(*(line + i + 2)))
dt->md_major = line_maj;
/* Look for blkext device */
else if (!strncmp("blkext", line + i, 6) && isspace(*(line + i + 6)))
dt->blkext_major = line_maj;
/* Look for drbd device */
else if (!strncmp("drbd", line + i, 4) && isspace(*(line + i + 4)))
dt->drbd_major = line_maj;
/* Look for DASD */
else if (!strncmp("dasd", line + i, 4) && isspace(*(line + i + 4)))
dt->dasd_major = line_maj;
/* Look for EMC powerpath */
else if (!strncmp("emcpower", line + i, 8) && isspace(*(line + i + 8)))
dt->emcpower_major = line_maj;
/* Look for Veritas Dynamic Multipathing */
else if (!strncmp("VxDMP", line + i, 5) && isspace(*(line + i + 5)))
dt->vxdmp_major = line_maj;
else if (!strncmp("power2", line + i, 6) && isspace(*(line + i + 6)))
dt->power2_major = line_maj;
/* Go through the valid device names and if there is a
match store max number of partitions */
for (j = 0; _dev_known_types[j].name[0]; j++) {
dev_len = strlen(_dev_known_types[j].name);
if (dev_len <= strlen(line + i) &&
!strncmp(_dev_known_types[j].name, line + i, dev_len) &&
(line_maj < NUMBER_OF_MAJORS)) {
dt->dev_type_array[line_maj].max_partitions =
_dev_known_types[j].max_partitions;
break;
}
}
if (!cn)
continue;
/* Check devices/types for local variations */
for (cv = cn->v; cv; cv = cv->next) {
if (cv->type != DM_CFG_STRING) {
log_error("Expecting string in devices/types "
"in config file");
if (fclose(pd))
log_sys_debug("fclose", proc_devices);
goto bad;
}
dev_len = strlen(cv->v.str);
name = cv->v.str;
cv = cv->next;
if (!cv || cv->type != DM_CFG_INT) {
log_error("Max partition count missing for %s "
"in devices/types in config file",
name);
if (fclose(pd))
log_sys_debug("fclose", proc_devices);
goto bad;
}
if (!cv->v.i) {
log_error("Zero partition count invalid for "
"%s in devices/types in config file",
name);
if (fclose(pd))
log_sys_debug("fclose", proc_devices);
goto bad;
}
if (dev_len <= strlen(line + i) &&
!strncmp(name, line + i, dev_len) &&
(line_maj < NUMBER_OF_MAJORS)) {
dt->dev_type_array[line_maj].max_partitions = cv->v.i;
break;
}
}
}
if (fclose(pd))
2024-05-23 01:41:57 +03:00
log_sys_debug("fclose", proc_devices);
return dt;
bad:
free(dt);
return NULL;
}
int dev_subsystem_part_major(struct dev_types *dt, struct device *dev)
{
dev_t primary_dev;
if (MAJOR(dev->dev) == dt->device_mapper_major)
return 1;
if (MAJOR(dev->dev) == dt->md_major)
return 1;
if (MAJOR(dev->dev) == dt->drbd_major)
return 1;
if (MAJOR(dev->dev) == dt->emcpower_major)
return 1;
if (MAJOR(dev->dev) == dt->power2_major)
return 1;
if (MAJOR(dev->dev) == dt->vxdmp_major)
return 1;
if ((MAJOR(dev->dev) == dt->blkext_major) &&
dev_get_primary_dev(dt, dev, &primary_dev) &&
(MAJOR(primary_dev) == dt->md_major))
return 1;
return 0;
}
const char *dev_subsystem_name(struct dev_types *dt, struct device *dev)
{
if (dev->flags & DEV_IS_NVME)
return "NVME";
devices: improve handling of duplicate PVs Example: /dev/loop0 and /dev/loop1 are duplicates, created by copying one backing file to the other. 'identity /dev/loopX' creates an identity mapping for loopX named idmloopX, which adds a duplicate for the named device. The duplicate selection code for lvmetad is incomplete, and lvmetad is disabled for this example. [~]# losetup -f loopfile0 [~]# pvs PV VG Fmt Attr PSize PFree /dev/loop0 foo lvm2 a-- 308.00m 296.00m [~]# losetup -f loopfile1 [~]# pvs Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/loop1 not /dev/loop0 Using duplicate PV /dev/loop1 which is more recent, replacing /dev/loop0 PV VG Fmt Attr PSize PFree /dev/loop1 foo lvm2 a-- 308.00m 308.00m [~]# ./identity /dev/loop0 [~]# pvs Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/loop1 not /dev/loop0 Using duplicate PV /dev/loop1 without holders, replacing /dev/loop0 Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/mapper/idmloop0 not /dev/loop1 Using duplicate PV /dev/mapper/idmloop0 from subsystem DM, replacing /dev/loop1 PV VG Fmt Attr PSize PFree /dev/mapper/idmloop0 foo lvm2 a-- 308.00m 296.00m [~]# ./identity /dev/loop1 [~]# pvs WARNING: duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV is being used from both devices /dev/loop0 and /dev/loop1 Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/loop1 not /dev/loop0 Using duplicate PV /dev/loop1 which is more recent, replacing /dev/loop0 Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/mapper/idmloop0 not /dev/loop1 Using duplicate PV /dev/mapper/idmloop0 from subsystem DM, replacing /dev/loop1 Found duplicate PV LnSOEqzEYED3RvIOa5PZP2s7uyuBLmAV: using /dev/mapper/idmloop1 not /dev/mapper/idmloop0 Using duplicate PV /dev/mapper/idmloop1 which is more recent, replacing /dev/mapper/idmloop0 PV VG Fmt Attr PSize PFree /dev/mapper/idmloop1 foo lvm2 a-- 308.00m 308.00m
2015-04-24 22:58:58 +03:00
if (MAJOR(dev->dev) == dt->device_mapper_major)
return "DM";
if (MAJOR(dev->dev) == dt->md_major)
return "MD";
if (MAJOR(dev->dev) == dt->drbd_major)
return "DRBD";
if (MAJOR(dev->dev) == dt->dasd_major)
return "DASD";
if (MAJOR(dev->dev) == dt->emcpower_major)
return "EMCPOWER";
if (MAJOR(dev->dev) == dt->power2_major)
return "POWER2";
if (MAJOR(dev->dev) == dt->vxdmp_major)
return "VXDMP";
if (MAJOR(dev->dev) == dt->blkext_major)
return "BLKEXT";
if (MAJOR(dev->dev) == dt->loop_major)
return "LOOP";
return "";
}
int major_max_partitions(struct dev_types *dt, int major)
{
if (major >= NUMBER_OF_MAJORS)
return 0;
return dt->dev_type_array[major].max_partitions;
}
int major_is_scsi_device(struct dev_types *dt, int major)
{
if (major >= NUMBER_OF_MAJORS)
return 0;
return (dt->dev_type_array[major].flags & PARTITION_SCSI_DEVICE) ? 1 : 0;
}
static int _loop_is_with_partscan(struct device *dev)
{
FILE *fp;
int partscan = 0;
char path[PATH_MAX];
char buffer[64];
if (dm_snprintf(path, sizeof(path), "%sdev/block/%u:%u/loop/partscan",
dm_sysfs_dir(), MAJOR(dev->dev), MINOR(dev->dev)) < 0) {
log_warn("Sysfs path for partscan is too long.");
return 0;
}
if (!(fp = fopen(path, "r")))
return 0; /* not there -> no partscan */
if (!fgets(buffer, sizeof(buffer), fp)) {
log_warn("Failed to read %s.", path);
} else if (sscanf(buffer, "%d", &partscan) != 1) {
log_warn("Failed to parse %s '%s'.", path, buffer);
partscan = 0;
}
if (fclose(fp))
log_sys_debug("fclose", path);
return partscan;
}
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
int dev_get_partition_number(struct device *dev, int *num)
{
char path[PATH_MAX];
char buf[8] = { 0 };
dev_t devt = dev->dev;
struct stat sb;
if (dev->part != -1) {
*num = dev->part;
return 1;
}
if (dm_snprintf(path, sizeof(path), "%sdev/block/%u:%u/partition",
dm_sysfs_dir(), MAJOR(devt), MINOR(devt)) < 0) {
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
log_error("Failed to create sysfs path for %s", dev_name(dev));
return 0;
}
if (stat(path, &sb)) {
dev->part = 0;
*num = 0;
return 1;
}
if (!get_sysfs_value(path, buf, sizeof(buf), 0)) {
log_error("Failed to read sysfs path for %s", dev_name(dev));
return 0;
}
if (!buf[0]) {
log_error("Failed to read sysfs partition value for %s", dev_name(dev));
return 0;
}
dev->part = atoi(buf);
*num = dev->part;
return 1;
}
/* See linux/genhd.h and fs/partitions/msdos */
2004-11-23 14:44:04 +03:00
#define PART_MAGIC 0xAA55
#define PART_MAGIC_OFFSET UINT64_C(0x1FE)
#define PART_OFFSET UINT64_C(0x1BE)
struct partition {
2008-01-30 17:00:02 +03:00
uint8_t boot_ind;
uint8_t head;
uint8_t sector;
uint8_t cyl;
uint8_t sys_ind; /* partition type */
uint8_t end_head;
uint8_t end_sector;
uint8_t end_cyl;
uint32_t start_sect;
uint32_t nr_sects;
} __attribute__((packed));
2004-11-23 14:44:04 +03:00
static int _has_sys_partition(struct device *dev)
{
char path[PATH_MAX];
struct stat info;
unsigned major = MAJOR(dev->dev);
unsigned minor = MINOR(dev->dev);
/* check if dev is a partition */
if (dm_snprintf(path, sizeof(path), "%sdev/block/%u:%u/partition",
dm_sysfs_dir(), major, minor) < 0) {
log_warn("WARNING: %s: partition path is too long.", dev_name(dev));
return 0;
}
if (stat(path, &info) == -1) {
if (errno != ENOENT)
log_sys_debug("stat", path);
return 0;
}
return 1;
}
static int _is_partitionable(struct dev_types *dt, struct device *dev)
2004-11-23 14:44:04 +03:00
{
int parts = major_max_partitions(dt, MAJOR(dev->dev));
2004-11-23 14:44:04 +03:00
if (MAJOR(dev->dev) == dt->device_mapper_major)
return 1;
/* All MD devices are partitionable via blkext (as of 2.6.28) */
if (MAJOR(dev->dev) == dt->md_major)
return 1;
/* All loop devices are partitionable via blkext (as of 3.2) */
if ((MAJOR(dev->dev) == dt->loop_major) &&
_loop_is_with_partscan(dev))
return 1;
if (dev_is_nvme(dt, dev)) {
/* If this dev is already a partition then it's not partitionable. */
if (_has_sys_partition(dev))
return 0;
return 1;
}
if ((parts <= 1) || (MINOR(dev->dev) % parts))
2004-11-24 23:38:05 +03:00
return 0;
2004-11-23 14:44:04 +03:00
2004-11-24 23:38:05 +03:00
return 1;
2004-11-23 14:44:04 +03:00
}
static int _has_partition_table(struct device *dev)
{
int ret = 0;
unsigned p;
struct {
uint8_t skip[PART_OFFSET];
struct partition part[4];
uint16_t magic;
} __attribute__((packed)) buf; /* sizeof() == SECTOR_SIZE */
2004-11-23 14:44:04 +03:00
if (!dev_read_bytes(dev, UINT64_C(0), sizeof(buf), &buf))
return_0;
/* FIXME Check for other types of partition table too */
/* Check for msdos partition table */
if (buf.magic == xlate16(PART_MAGIC)) {
for (p = 0; p < 4; ++p) {
/* Table is invalid if boot indicator not 0 or 0x80 */
if (buf.part[p].boot_ind & 0x7f) {
ret = 0;
break;
}
/* Must have at least one non-empty partition */
if (buf.part[p].nr_sects)
ret = 1;
}
}
2004-11-23 14:44:04 +03:00
return ret;
}
#ifdef UDEV_SYNC_SUPPORT
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
static int _dev_is_partitioned_udev(struct dev_types *dt, struct device *dev)
{
struct dev_ext *ext;
struct udev_device *device;
const char *value;
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
/*
* external_device_info_source="udev" enables these udev checks.
* external_device_info_source="none" disables them.
*/
if (!(ext = dev_ext_get(dev)))
return_0;
device = (struct udev_device *) ext->handle;
if (!(value = udev_device_get_property_value(device, DEV_EXT_UDEV_BLKID_PART_TABLE_TYPE)))
return 0;
/*
* Device-mapper devices have DEV_EXT_UDEV_BLKID_PART_TABLE_TYPE
* variable set if there's partition table found on whole device.
* Partitions do not have this variable set - it's enough to use
* only this variable to decide whether this device has partition
* table on it.
*/
if (MAJOR(dev->dev) == dt->device_mapper_major)
return 1;
/*
* Other devices have DEV_EXT_UDEV_BLKID_PART_TABLE_TYPE set for
* *both* whole device and partitions. We need to look at the
* DEV_EXT_UDEV_DEVTYPE in addition to decide - whole device
* with partition table on it has this variable set to
* DEV_EXT_UDEV_DEVTYPE_DISK.
*/
if (!(value = udev_device_get_property_value(device, DEV_EXT_UDEV_DEVTYPE)))
return_0;
return !strcmp(value, DEV_EXT_UDEV_DEVTYPE_DISK);
}
#else
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
static int _dev_is_partitioned_udev(struct dev_types *dt, struct device *dev)
{
return 0;
}
#endif
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
static int _dev_is_partitioned_native(struct dev_types *dt, struct device *dev)
2004-11-23 14:44:04 +03:00
{
int r;
/* Unpartitioned DASD devices are not supported. */
if ((MAJOR(dev->dev) == dt->dasd_major) && dasd_is_cdl_formatted(dev))
return 1;
r = _has_partition_table(dev);
return r;
}
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
int dev_is_partitioned(struct cmd_context *cmd, struct device *dev)
{
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
struct dev_types *dt = cmd->dev_types;
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
if (!_is_partitionable(dt, dev))
return 0;
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
if (_dev_is_partitioned_native(dt, dev) == 1)
return 1;
if (external_device_info_source() == DEV_EXT_UDEV) {
if (_dev_is_partitioned_udev(dt, dev) == 1)
return 1;
}
return 0;
2004-11-23 14:44:04 +03:00
}
/*
* Get primary dev for the dev supplied.
*
* We can get a primary device for a partition either by:
* A: knowing the number of partitions allowed for the dev and also
* which major:minor number represents the primary and partition device
* (by using the dev_types->dev_type_array)
* B: by the existence of the 'partition' sysfs attribute
* (/dev/block/<major>:<minor>/partition)
*
* Method A is tried first, then method B as a fallback if A fails.
*
* N.B. Method B can only do the decision based on the pure existence of
* the 'partition' sysfs item. There's no direct scan for partition
* tables whatsoever!
*
* Returns:
* 0 on error
* 1 if the dev is already a primary dev, primary dev in 'result'
* 2 if the dev is a partition, primary dev in 'result'
*/
int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result)
{
unsigned major = MAJOR(dev->dev);
unsigned minor = MINOR(dev->dev);
char path[PATH_MAX];
char temp_path[PATH_MAX];
char buffer[64];
FILE *fp = NULL;
int parts, residue, size, ret = 0;
/*
* /dev/nvme devs don't use the major:minor numbering like
* block dev types that have their own major number, so
* the calculation based on minor number doesn't work.
*/
if (dev_is_nvme(dt, dev))
goto sys_partition;
/*
* Try to get the primary dev out of the
* list of known device types first.
*/
if ((parts = dt->dev_type_array[major].max_partitions) > 1) {
if ((residue = minor % parts)) {
*result = MKDEV(major, (minor - residue));
ret = 2;
} else {
*result = dev->dev;
ret = 1; /* dev is not a partition! */
}
goto out;
}
sys_partition:
/*
* If we can't get the primary dev out of the list of known device
* types, try to look at sysfs directly then. This is more complex
* way and it also requires certain sysfs layout to be present
* which might not be there in old kernels!
*/
if (!_has_sys_partition(dev)) {
*result = dev->dev;
ret = 1;
goto out; /* dev is not a partition! */
}
/*
* extract parent's path from the partition's symlink, e.g.:
* - readlink /sys/dev/block/259:0 = ../../block/md0/md0p1
* - dirname ../../block/md0/md0p1 = ../../block/md0
* - basename ../../block/md0/md0 = md0
* Parent's 'dev' sysfs attribute = /sys/block/md0/dev
*/
if (dm_snprintf(path, sizeof(path), "%sdev/block/%u:%u",
dm_sysfs_dir(), major, minor) < 0) {
log_warn("WARNING: %s: major:minor sysfs path is too long.", dev_name(dev));
return 0;
}
if ((size = readlink(path, temp_path, sizeof(temp_path) - 1)) < 0) {
log_warn("WARNING: Readlink of %s failed.", path);
goto out;
}
temp_path[size] = '\0';
if (dm_snprintf(path, sizeof(path), "%sblock/%s/dev",
dm_sysfs_dir(), basename(dirname(temp_path))) < 0) {
log_warn("WARNING: sysfs path for %s is too long.",
basename(dirname(temp_path)));
goto out;
}
/* finally, parse 'dev' attribute and create corresponding dev_t */
if (!(fp = fopen(path, "r"))) {
if (errno == ENOENT)
log_debug("sysfs file %s does not exist.", path);
else
log_sys_debug("fopen", path);
goto out;
}
if (!fgets(buffer, sizeof(buffer), fp)) {
log_sys_error("fgets", path);
goto out;
}
if (sscanf(buffer, "%d:%d", &major, &minor) != 2) {
log_warn("WARNING: sysfs file %s not in expected MAJ:MIN format: %s",
path, buffer);
goto out;
}
*result = MKDEV(major, minor);
ret = 2;
out:
if (fp && fclose(fp))
log_sys_debug("fclose", path);
return ret;
}
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
#ifdef BLKID_WIPING_SUPPORT
int fs_block_size_and_type(const char *pathname, uint32_t *fs_block_size_bytes, char *fstype, int *nofs)
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
{
blkid_probe probe = NULL;
const char *type_str = NULL, *size_str = NULL;
2023-04-26 15:15:21 +03:00
size_t len = 0;
int ret = 1;
int rc;
if (!(probe = blkid_new_probe_from_filename(pathname))) {
log_error("Failed libblkid probe setup for %s", pathname);
return 0;
}
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
blkid_probe_enable_superblocks(probe, 1);
blkid_probe_set_superblocks_flags(probe,
BLKID_SUBLKS_LABEL | BLKID_SUBLKS_LABELRAW |
BLKID_SUBLKS_UUID | BLKID_SUBLKS_UUIDRAW |
BLKID_SUBLKS_TYPE | BLKID_SUBLKS_SECTYPE |
BLKID_SUBLKS_USAGE | BLKID_SUBLKS_VERSION |
#ifdef BLKID_SUBLKS_FSINFO
BLKID_SUBLKS_FSINFO |
#endif
BLKID_SUBLKS_MAGIC);
rc = blkid_do_safeprobe(probe);
if (rc < 0) {
log_debug("Failed libblkid probe for %s", pathname);
ret = 0;
goto out;
} else if (rc == 1) {
/* no file system on the device */
log_debug("No file system found on %s.", pathname);
if (nofs)
*nofs = 1;
goto out;
}
if (!blkid_probe_lookup_value(probe, "TYPE", &type_str, &len) && len && type_str) {
if (fstype)
strncpy(fstype, type_str, FSTYPE_MAX);
2020-06-11 20:43:07 +03:00
} else {
/* any difference from blkid_do_safeprobe rc=1? */
log_debug("No file system type on %s.", pathname);
if (nofs)
*nofs = 1;
goto out;
}
if (fs_block_size_bytes) {
if (!blkid_probe_lookup_value(probe, "BLOCK_SIZE", &size_str, &len) && len && size_str)
*fs_block_size_bytes = atoi(size_str);
else
*fs_block_size_bytes = 0;
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
}
log_debug("Found blkid fstype %s fsblocksize %s on %s",
type_str ?: "none", size_str ?: "unused", pathname);
out:
blkid_free_probe(probe);
return ret;
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
}
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
int fs_get_blkid(const char *pathname, struct fs_info *fsi)
{
blkid_probe probe = NULL;
2023-04-26 15:15:21 +03:00
const char *str = "";
size_t len = 0;
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
uint64_t fslastblock = 0;
unsigned int fsblocksize = 0;
int rc;
if (!(probe = blkid_new_probe_from_filename(pathname))) {
log_error("Failed libblkid probe setup for %s", pathname);
return 0;
}
blkid_probe_enable_superblocks(probe, 1);
blkid_probe_set_superblocks_flags(probe,
BLKID_SUBLKS_LABEL | BLKID_SUBLKS_LABELRAW |
BLKID_SUBLKS_UUID | BLKID_SUBLKS_UUIDRAW |
BLKID_SUBLKS_TYPE | BLKID_SUBLKS_SECTYPE |
BLKID_SUBLKS_USAGE | BLKID_SUBLKS_VERSION |
#ifdef BLKID_SUBLKS_FSINFO
BLKID_SUBLKS_FSINFO |
#endif
BLKID_SUBLKS_MAGIC);
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
rc = blkid_do_safeprobe(probe);
if (rc < 0) {
log_error("Failed libblkid probe for %s", pathname);
blkid_free_probe(probe);
return 0;
} else if (rc == 1) {
/* no file system on the device */
log_print_unless_silent("No file system found on %s.", pathname);
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
fsi->nofs = 1;
blkid_free_probe(probe);
return 1;
}
if (!blkid_probe_lookup_value(probe, "TYPE", &str, &len) && len)
strncpy(fsi->fstype, str, sizeof(fsi->fstype)-1);
else {
/* any difference from blkid_do_safeprobe rc=1? */
log_print_unless_silent("No file system type on %s.", pathname);
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
fsi->nofs = 1;
blkid_free_probe(probe);
return 1;
}
if (!blkid_probe_lookup_value(probe, "BLOCK_SIZE", &str, &len) && len)
fsi->fs_block_size_bytes = atoi(str);
if (!blkid_probe_lookup_value(probe, "FSLASTBLOCK", &str, &len) && len)
fslastblock = strtoull(str, NULL, 0);
if (!blkid_probe_lookup_value(probe, "FSBLOCKSIZE", &str, &len) && len)
fsblocksize = (unsigned int)atoi(str);
blkid_free_probe(probe);
if (fslastblock && fsblocksize)
fsi->fs_last_byte = fslastblock * fsblocksize;
log_debug("libblkid TYPE %s BLOCK_SIZE %d FSLASTBLOCK %llu FSBLOCKSIZE %u fs_last_byte %llu",
fsi->fstype, fsi->fs_block_size_bytes, (unsigned long long)fslastblock, fsblocksize,
(unsigned long long)fsi->fs_last_byte);
return 1;
}
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
#else
int fs_block_size_and_type(const char *pathname, uint32_t *fs_block_size_bytes, char *fstype, int *nofs)
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
{
2020-06-11 20:43:07 +03:00
log_debug("Disabled blkid BLOCK_SIZE for fs.");
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
return 0;
}
lvresize: add new options and defaults for fs handling The new option "--fs String" for lvresize/lvreduce/lvextend controls the handling of file systems before/after resizing the LV. --resizefs is the same as --fs resize. The new option "--fsmode String" can be used to control mounting and unmounting of the fs during resizing. Possible --fs values: checksize Only applies to reducing size; does nothing for extend. Check the fs size and reduce the LV if the fs is not using the affected space, i.e. the fs does not need to be shrunk. Fail the command without reducing the fs or LV if the fs is using the affected space. resize Resize the fs using the fs-specific resize command. This may include mounting, unmounting, or running fsck. See --fsmode to control mounting behavior, and --nofsck to disable fsck. resize_fsadm Use the old method of calling fsadm to handle the fs (deprecated.) Warning: this option does not prevent lvreduce from destroying file systems that are unmounted (or mounted if prompts are skipped.) ignore Resize the LV without checking for or handling a file system. Warning: using ignore when reducing the LV size may destroy the file system. Possible --fsmode values: manage Mount or unmount the fs as needed to resize the fs, and attempt to restore the original mount state at the end. nochange Do not mount or unmount the fs. If mounting or unmounting is required to resize the fs, then do not resize the fs or the LV and fail the command. offline Unmount the fs if it is mounted, and resize the fs while it is unmounted. If mounting is required to resize the fs, then do not resize the fs or the LV and fail the command. Notes on lvreduce: When no --fs or --resizefs option is specified: . lvextend default behavior is fs ignore. . lvreduce default behavior is fs checksize (includes activating the LV.) With the exception of --fs resize_fsadm|ignore, lvreduce requires the recent libblkid fields FSLASTBLOCK and FSBLOCKSIZE. FSLASTBLOCK*FSBLOCKSIZE is the last byte used by the fs on the LV, which determines if reducing the fs is necessary.
2022-06-14 23:20:21 +03:00
int fs_get_blkid(const char *pathname, struct fs_info *fsi)
{
log_debug("Disabled blkid for fs info.");
return 0;
}
Allow dm-integrity to be used for raid images dm-integrity stores checksums of the data written to an LV, and returns an error if data read from the LV does not match the previously saved checksum. When used on raid images, dm-raid will correct the error by reading the block from another image, and the device user sees no error. The integrity metadata (checksums) are stored on an internal LV allocated by lvm for each linear image. The internal LV is allocated on the same PV as the image. Create a raid LV with an integrity layer over each raid image (for raid levels 1,4,5,6,10): lvcreate --type raidN --raidintegrity y [options] Add an integrity layer to images of an existing raid LV: lvconvert --raidintegrity y LV Remove the integrity layer from images of a raid LV: lvconvert --raidintegrity n LV Settings Use --raidintegritymode journal|bitmap (journal is default) to configure the method used by dm-integrity to ensure crash consistency. Initialization When integrity is added to an LV, the kernel needs to initialize the integrity metadata/checksums for all blocks in the LV. The data corruption checking performed by dm-integrity will only operate on areas of the LV that are already initialized. The progress of integrity initialization is reported by the "syncpercent" LV reporting field (and under the Cpy%Sync lvs column.) Example: create a raid1 LV with integrity: $ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB. Logical volume "rr_rimage_0_imeta" created. Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB. Logical volume "rr_rimage_1_imeta" created. Logical volume "rr" created. $ lvs -a foo LV VG Attr LSize Origin Cpy%Sync rr foo rwi-a-r--- 1.00g 4.93 [rr_rimage_0] foo gwi-aor--- 1.00g [rr_rimage_0_iorig] 41.02 [rr_rimage_0_imeta] foo ewi-ao---- 12.00m [rr_rimage_0_iorig] foo -wi-ao---- 1.00g [rr_rimage_1] foo gwi-aor--- 1.00g [rr_rimage_1_iorig] 39.45 [rr_rimage_1_imeta] foo ewi-ao---- 12.00m [rr_rimage_1_iorig] foo -wi-ao---- 1.00g [rr_rmeta_0] foo ewi-aor--- 4.00m [rr_rmeta_1] foo ewi-aor--- 4.00m
2019-11-21 01:07:27 +03:00
#endif
#ifdef BLKID_WIPING_SUPPORT
static inline int _type_in_flag_list(const char *type, uint32_t flag_list)
{
return (((flag_list & TYPE_LVM2_MEMBER) && !strcmp(type, "LVM2_member")) ||
((flag_list & TYPE_LVM1_MEMBER) && !strcmp(type, "LVM1_member")) ||
((flag_list & TYPE_DM_SNAPSHOT_COW) && !strcmp(type, "DM_snapshot_cow")));
}
#define MSG_FAILED_SIG_OFFSET "Failed to get offset of the %s signature on %s."
#define MSG_FAILED_SIG_LENGTH "Failed to get length of the %s signature on %s."
#define MSG_WIPING_SKIPPED " Wiping skipped."
static int _blkid_wipe(blkid_probe probe, struct device *dev, const char *name,
uint32_t types_to_exclude, uint32_t types_no_prompt,
int yes, force_t force)
{
static const char _msg_wiping[] = "Wiping %s signature on %s.";
const char *offset = NULL, *type = NULL, *magic = NULL,
*usage = NULL, *label = NULL, *uuid = NULL;
loff_t offset_value;
2021-07-27 23:39:16 +03:00
size_t len = 0;
if (!blkid_probe_lookup_value(probe, "TYPE", &type, NULL)) {
if (_type_in_flag_list(type, types_to_exclude))
return 2;
if (blkid_probe_lookup_value(probe, "SBMAGIC_OFFSET", &offset, NULL)) {
if (force < DONT_PROMPT) {
log_error(MSG_FAILED_SIG_OFFSET, type, name);
return 0;
}
log_warn("WARNING: " MSG_FAILED_SIG_OFFSET MSG_WIPING_SKIPPED, type, name);
return 2;
}
if (blkid_probe_lookup_value(probe, "SBMAGIC", &magic, &len)) {
if (force < DONT_PROMPT) {
log_error(MSG_FAILED_SIG_LENGTH, type, name);
return 0;
}
log_warn("WARNING: " MSG_FAILED_SIG_LENGTH MSG_WIPING_SKIPPED, type, name);
return 2;
}
} else if (!blkid_probe_lookup_value(probe, "PTTYPE", &type, NULL)) {
if (blkid_probe_lookup_value(probe, "PTMAGIC_OFFSET", &offset, NULL)) {
if (force < DONT_PROMPT) {
log_error(MSG_FAILED_SIG_OFFSET, type, name);
return 0;
}
log_warn("WARNING: " MSG_FAILED_SIG_OFFSET MSG_WIPING_SKIPPED, type, name);
return 2;
}
if (blkid_probe_lookup_value(probe, "PTMAGIC", &magic, &len)) {
if (force < DONT_PROMPT) {
log_error(MSG_FAILED_SIG_LENGTH, type, name);
return 0;
}
log_warn("WARNING: " MSG_FAILED_SIG_LENGTH MSG_WIPING_SKIPPED, type, name);
return 2;
}
usage = "partition table";
} else
return_0;
offset_value = strtoll(offset, NULL, 10);
if (!usage)
(void) blkid_probe_lookup_value(probe, "USAGE", &usage, NULL);
(void) blkid_probe_lookup_value(probe, "LABEL", &label, NULL);
(void) blkid_probe_lookup_value(probe, "UUID", &uuid, NULL);
/* Return values ignored here, in the worst case we print NULL */
log_verbose("Found existing signature on %s at offset %s: LABEL=\"%s\" "
"UUID=\"%s\" TYPE=\"%s\" USAGE=\"%s\"",
name, offset, label, uuid, type, usage);
if (!_type_in_flag_list(type, types_no_prompt)) {
if (!yes && (force == PROMPT) &&
yes_no_prompt("WARNING: %s signature detected on %s at offset %s. "
"Wipe it? [y/n]: ", type, name, offset) == 'n') {
log_error("Aborted wiping of %s.", type);
return 0;
}
2014-02-10 00:33:36 +04:00
log_print_unless_silent(_msg_wiping, type, name);
} else
2014-02-10 00:33:36 +04:00
log_verbose(_msg_wiping, type, name);
if (!dev_write_zeros(dev, offset_value, len)) {
log_error("Failed to wipe %s signature on %s.", type, name);
return 0;
}
return 1;
}
static int _wipe_known_signatures_with_blkid(struct device *dev, const char *name,
uint32_t types_to_exclude,
uint32_t types_no_prompt,
int yes, force_t force, int *wiped)
{
blkid_probe probe = NULL;
int found = 0, left = 0, wiped_tmp;
int r_wipe;
int r = 0;
if (!wiped)
wiped = &wiped_tmp;
*wiped = 0;
/* TODO: Should we check for valid dev - _dev_is_valid(dev)? */
if (dm_list_empty(&dev->aliases))
goto_out;
if (!(probe = blkid_new_probe_from_filename(dev_name(dev)))) {
log_error("Failed to create a new blkid probe for device %s.", dev_name(dev));
goto out;
}
blkid_probe_enable_partitions(probe, 1);
blkid_probe_set_partitions_flags(probe, BLKID_PARTS_MAGIC);
blkid_probe_enable_superblocks(probe, 1);
blkid_probe_set_superblocks_flags(probe, BLKID_SUBLKS_LABEL |
BLKID_SUBLKS_UUID |
BLKID_SUBLKS_TYPE |
BLKID_SUBLKS_USAGE |
BLKID_SUBLKS_VERSION |
BLKID_SUBLKS_MAGIC |
BLKID_SUBLKS_BADCSUM);
while (!blkid_do_probe(probe)) {
if ((r_wipe = _blkid_wipe(probe, dev, name, types_to_exclude, types_no_prompt, yes, force)) == 1) {
(*wiped)++;
if (blkid_probe_step_back(probe)) {
log_error("Failed to step back blkid probe to check just wiped signature.");
goto out;
}
}
/* do not count excluded types */
if (r_wipe != 2)
found++;
}
if (!found)
r = 1;
left = found - *wiped;
if (!left)
r = 1;
else
log_warn("%d existing signature%s left on the device.",
left, left > 1 ? "s" : "");
out:
if (probe)
blkid_free_probe(probe);
return r;
}
#endif /* BLKID_WIPING_SUPPORT */
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
static int _wipe_signature(struct cmd_context *cmd, struct device *dev, const char *type, const char *name,
int wipe_len, int yes, force_t force, int *wiped,
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
int (*signature_detection_fn)(struct cmd_context *cmd, struct device *dev, uint64_t *offset_found, int full))
{
int wipe;
uint64_t offset_found = 0;
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
wipe = signature_detection_fn(cmd, dev, &offset_found, 1);
if (wipe == -1) {
log_error("Fatal error while trying to detect %s on %s.",
type, name);
return 0;
}
if (wipe == 0)
return 1;
/* Specifying --yes => do not ask. */
if (!yes && (force == PROMPT) &&
yes_no_prompt("WARNING: %s detected on %s. Wipe it? [y/n]: ",
type, name) == 'n') {
log_error("Aborted wiping of %s.", type);
return 0;
}
log_print_unless_silent("Wiping %s on %s.", type, name);
if (!dev_write_zeros(dev, offset_found, wipe_len)) {
log_error("Failed to wipe %s on %s.", type, name);
return 0;
}
(*wiped)++;
return 1;
}
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
static int _wipe_known_signatures_with_lvm(struct cmd_context *cmd, struct device *dev, const char *name,
uint32_t types_to_exclude __attribute__((unused)),
uint32_t types_no_prompt __attribute__((unused)),
int yes, force_t force, int *wiped)
{
int wiped_tmp;
if (!wiped)
wiped = &wiped_tmp;
*wiped = 0;
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
if (!_wipe_signature(cmd, dev, "software RAID md superblock", name, 4, yes, force, wiped, dev_is_md_component) ||
!_wipe_signature(cmd, dev, "swap signature", name, 10, yes, force, wiped, dev_is_swap) ||
!_wipe_signature(cmd, dev, "LUKS signature", name, 8, yes, force, wiped, dev_is_luks))
return 0;
return 1;
}
int wipe_known_signatures(struct cmd_context *cmd, struct device *dev,
const char *name, uint32_t types_to_exclude,
uint32_t types_no_prompt, int yes, force_t force,
int *wiped)
{
int blkid_wiping_enabled = find_config_tree_bool(cmd, allocation_use_blkid_wiping_CFG, NULL);
#ifdef BLKID_WIPING_SUPPORT
if (blkid_wiping_enabled)
return _wipe_known_signatures_with_blkid(dev, name,
types_to_exclude,
types_no_prompt,
yes, force, wiped);
#endif
if (blkid_wiping_enabled) {
log_warn("WARNING: allocation/use_blkid_wiping=1 configuration setting is set "
"while LVM is not compiled with blkid wiping support.");
log_warn("WARNING: Falling back to native LVM signature detection.");
}
devices: rework libudev usage related to config settings: obtain_device_info_from_udev (controls if lvm gets a list of devices from readdir /dev or from libudev) external_device_info_source (controls if lvm asks libudev for device information) . Make the obtain_device_list_from_udev setting affect only the choice of readdir /dev vs libudev. The setting no longer controls if udev is used for device type checks. . Change obtain_device_list_from_udev default to 0. This helps avoid boot timeouts due to slow libudev queries, avoids reported failures from udev_enumerate_scan_devices, and avoids delays from "device not initialized in udev database" errors. Even without errors, for a system booting with 1024 PVs, lvm2-pvscan times improve from about 100 sec to 15 sec, and the pvscan command from about 64 sec to about 4 sec. . For external_device_info_source="none", remove all libudev device info queries, and use only lvm native device info. . For external_device_info_source="udev", first check lvm native device info, then check libudev info. . Remove sleep/retry loop when attempting libudev queries for device info. udev info will simply be skipped if it's not immediately available. . Only set up a libdev connection if it will be used by obtain_device_list_from_udev/external_device_info_source. . For native multipath component detection, use /etc/multipath/wwids. If a device has a wwid matching an entry in the wwids file, then it's considered a multipath component. This is necessary to natively detect multipath components when the mpath device is not set up.
2021-06-09 01:12:09 +03:00
return _wipe_known_signatures_with_lvm(cmd, dev, name,
types_to_exclude,
types_no_prompt,
yes, force, wiped);
}
#ifdef __linux__
static int _snprintf_attr(char *buf, size_t buf_size, const char *sysfs_dir,
const char *attribute, dev_t dev)
{
if (dm_snprintf(buf, buf_size, "%sdev/block/%u:%u/%s", sysfs_dir,
MAJOR(dev), MINOR(dev), attribute) < 0) {
log_warn("WARNING: sysfs path for %s attribute is too long.", attribute);
return 0;
}
return 1;
}
static int _dev_sysfs_block_attribute(struct dev_types *dt,
const char *attribute,
struct device *dev,
unsigned long *value)
{
const char *sysfs_dir = dm_sysfs_dir();
char path[PATH_MAX], buffer[64];
FILE *fp;
dev_t primary = 0;
int ret = 0;
if (!attribute || !*attribute)
goto_out;
if (!sysfs_dir || !*sysfs_dir)
goto_out;
if (!_snprintf_attr(path, sizeof(path), sysfs_dir, attribute, dev->dev))
goto_out;
/*
* check if the desired sysfs attribute exists
* - if not: either the kernel doesn't have topology support
* or the device could be a partition
*/
if (!(fp = fopen(path, "r"))) {
if (errno != ENOENT) {
log_sys_debug("fopen", path);
goto out;
}
if (!dev_get_primary_dev(dt, dev, &primary))
goto out;
/* get attribute from partition's primary device */
if (!_snprintf_attr(path, sizeof(path), sysfs_dir, attribute, primary))
goto_out;
if (!(fp = fopen(path, "r"))) {
if (errno != ENOENT)
log_sys_debug("fopen", path);
goto out;
}
}
if (!fgets(buffer, sizeof(buffer), fp)) {
log_sys_debug("fgets", path);
goto out_close;
}
if (sscanf(buffer, "%lu", value) != 1) {
log_warn("WARNING: sysfs file %s not in expected format: %s", path, buffer);
goto out_close;
}
ret = 1;
out_close:
if (fclose(fp))
log_sys_debug("fclose", path);
out:
return ret;
}
static unsigned long _dev_topology_attribute(struct dev_types *dt,
const char *attribute,
struct device *dev,
unsigned long default_value)
{
unsigned long result = default_value;
unsigned long value = 0UL;
if (_dev_sysfs_block_attribute(dt, attribute, dev, &value)) {
log_very_verbose("Device %s: %s is %lu%s.",
dev_name(dev), attribute, value, default_value ? "" : " bytes");
result = value >> SECTOR_SHIFT;
if (!result && value) {
log_warn("WARNING: Device %s: %s is %lu and is unexpectedly less than sector.",
dev_name(dev), attribute, value);
result = 1;
}
}
return result;
}
unsigned long dev_alignment_offset(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "alignment_offset", dev, 0UL);
}
unsigned long dev_minimum_io_size(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/minimum_io_size", dev, 0UL);
}
unsigned long dev_optimal_io_size(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/optimal_io_size", dev, 0UL);
}
unsigned long dev_discard_max_bytes(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/discard_max_bytes", dev, 0UL);
}
unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev)
{
return _dev_topology_attribute(dt, "queue/discard_granularity", dev, 0UL);
}
int dev_is_rotational(struct dev_types *dt, struct device *dev)
{
unsigned long value;
return _dev_sysfs_block_attribute(dt, "queue/rotational", dev, &value) ? (int) value : 1;
}
/* dev is pmem if /sys/dev/block/<major>:<minor>/queue/dax is 1 */
int dev_is_pmem(struct dev_types *dt, struct device *dev)
{
unsigned long value;
return _dev_sysfs_block_attribute(dt, "queue/dax", dev, &value) ? (int) value : 0;
}
#else
int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result)
{
return 0;
}
unsigned long dev_alignment_offset(struct dev_types *dt, struct device *dev)
{
return 0UL;
}
unsigned long dev_minimum_io_size(struct dev_types *dt, struct device *dev)
{
return 0UL;
}
unsigned long dev_optimal_io_size(struct dev_types *dt, struct device *dev)
{
return 0UL;
}
unsigned long dev_discard_max_bytes(struct dev_types *dt, struct device *dev)
{
return 0UL;
}
unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev)
{
return 0UL;
}
int dev_is_rotational(struct dev_types *dt, struct device *dev)
{
return 1;
}
int dev_is_pmem(struct dev_types *dt, struct device *dev)
{
return 0;
}
#endif