mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
Merge branch 'master' of git://sourceware.org/git/lvm2
This commit is contained in:
commit
7ee0a6e44d
@ -162,8 +162,9 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
|
||||
struct iocb *cb_array[1];
|
||||
struct control_block *cb;
|
||||
struct async_engine *e = _to_async(ioe);
|
||||
long pgsize = sysconf(_SC_PAGESIZE);
|
||||
|
||||
if (((uintptr_t) data) & (PAGE_SIZE - 1)) {
|
||||
if (((uintptr_t) data) & (pgsize - 1)) {
|
||||
log_warn("misaligned data buffer");
|
||||
return false;
|
||||
}
|
||||
@ -547,8 +548,9 @@ static bool _init_free_list(struct bcache *cache, unsigned count)
|
||||
{
|
||||
unsigned i;
|
||||
size_t block_size = cache->block_sectors << SECTOR_SHIFT;
|
||||
long pgsize = sysconf(_SC_PAGESIZE);
|
||||
unsigned char *data =
|
||||
(unsigned char *) _alloc_aligned(count * block_size, PAGE_SIZE);
|
||||
(unsigned char *) _alloc_aligned(count * block_size, pgsize);
|
||||
|
||||
/* Allocate the data for each block. We page align the data. */
|
||||
if (!data)
|
||||
@ -899,6 +901,7 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
|
||||
{
|
||||
struct bcache *cache;
|
||||
unsigned max_io = engine->max_io(engine);
|
||||
long pgsize = sysconf(_SC_PAGESIZE);
|
||||
|
||||
if (!nr_cache_blocks) {
|
||||
log_warn("bcache must have at least one cache block");
|
||||
@ -910,7 +913,7 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (block_sectors & ((PAGE_SIZE >> SECTOR_SHIFT) - 1)) {
|
||||
if (block_sectors & ((pgsize >> SECTOR_SHIFT) - 1)) {
|
||||
log_warn("bcache block size must be a multiple of page size");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ int label_remove(struct device *dev)
|
||||
|
||||
log_very_verbose("Scanning for labels to wipe from %s", dev_name(dev));
|
||||
|
||||
if (!label_scan_open(dev)) {
|
||||
if (!label_scan_open_excl(dev)) {
|
||||
log_error("Failed to open device %s", dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
@ -977,6 +977,12 @@ int label_scan_open(struct device *dev)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int label_scan_open_excl(struct device *dev)
|
||||
{
|
||||
dev->flags |= DEV_BCACHE_EXCL;
|
||||
return label_scan_open(dev);
|
||||
}
|
||||
|
||||
bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data)
|
||||
{
|
||||
if (!scan_bcache) {
|
||||
|
@ -114,6 +114,7 @@ int label_read_sector(struct device *dev, uint64_t scan_sector);
|
||||
void label_scan_confirm(struct device *dev);
|
||||
int label_scan_setup_bcache(void);
|
||||
int label_scan_open(struct device *dev);
|
||||
int label_scan_open_excl(struct device *dev);
|
||||
|
||||
/*
|
||||
* Wrappers around bcache equivalents.
|
||||
|
@ -184,7 +184,7 @@ int update_cache_pool_params(struct cmd_context *cmd,
|
||||
* keep user informed he might be using things in untintended direction
|
||||
*/
|
||||
log_print_unless_silent("Using %s chunk size instead of default %s, "
|
||||
"so cache pool has less then " FMTu64 " chunks.",
|
||||
"so cache pool has less than " FMTu64 " chunks.",
|
||||
display_size(cmd, min_chunk_size),
|
||||
display_size(cmd, *chunk_size),
|
||||
max_chunks);
|
||||
@ -193,7 +193,7 @@ int update_cache_pool_params(struct cmd_context *cmd,
|
||||
log_verbose("Setting chunk size to %s.",
|
||||
display_size(cmd, *chunk_size));
|
||||
} else if (*chunk_size < min_chunk_size) {
|
||||
log_error("Chunk size %s is less then required minimal chunk size %s "
|
||||
log_error("Chunk size %s is less than required minimal chunk size %s "
|
||||
"for a cache pool of %s size and limit " FMTu64 " chunks.",
|
||||
display_size(cmd, *chunk_size),
|
||||
display_size(cmd, min_chunk_size),
|
||||
|
@ -227,13 +227,12 @@ static int _pvcreate_check(struct cmd_context *cmd, const char *name,
|
||||
/*
|
||||
* This test will fail if the device belongs to an MD array.
|
||||
*/
|
||||
if (!dev_test_excl(dev)) {
|
||||
if (!label_scan_open_excl(dev)) {
|
||||
/* FIXME Detect whether device-mapper itself is still using it */
|
||||
log_error("Can't open %s exclusively. Mounted filesystem?",
|
||||
name);
|
||||
goto out;
|
||||
}
|
||||
dev_close(dev);
|
||||
|
||||
if (!wipe_known_signatures(cmd, dev, name,
|
||||
TYPE_LVM1_MEMBER | TYPE_LVM2_MEMBER,
|
||||
@ -578,16 +577,6 @@ static int _pvremove_single(struct cmd_context *cmd, const char *pv_name,
|
||||
goto out;
|
||||
}
|
||||
|
||||
// FIXME: why is this called if info is not used?
|
||||
//info = lvmcache_info_from_pvid(dev->pvid, dev, 0);
|
||||
|
||||
if (!dev_test_excl(dev)) {
|
||||
/* FIXME Detect whether device-mapper is still using the device */
|
||||
log_error("Can't open %s exclusively - not removing. "
|
||||
"Mounted filesystem?", dev_name(dev));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Wipe existing label(s) */
|
||||
if (!label_remove(dev)) {
|
||||
log_error("Failed to wipe existing label(s) on %s", pv_name);
|
||||
|
@ -1050,7 +1050,7 @@ uint32_t extents_from_size(struct cmd_context *cmd, uint64_t size,
|
||||
|
||||
if (size > (uint64_t) MAX_EXTENT_COUNT * extent_size) {
|
||||
log_error("Volume too large (%s) for extent size %s. "
|
||||
"Upper limit is less then %s.",
|
||||
"Upper limit is less than %s.",
|
||||
display_size(cmd, size),
|
||||
display_size(cmd, (uint64_t) extent_size),
|
||||
display_size(cmd, (uint64_t) MAX_EXTENT_COUNT *
|
||||
@ -1413,7 +1413,7 @@ static int _pvcreate_write(struct cmd_context *cmd, struct pv_to_write *pvw)
|
||||
struct device *dev = pv->dev;
|
||||
const char *pv_name = dev_name(dev);
|
||||
|
||||
if (!label_scan_open(dev)) {
|
||||
if (!label_scan_open_excl(dev)) {
|
||||
log_error("%s not opened: device not written", pv_name);
|
||||
return 0;
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ int pool_metadata_min_threshold(const struct lv_segment *pool_seg)
|
||||
*
|
||||
* In the metadata LV there should be minimum from either 4MiB of free space
|
||||
* or at least 25% of free space, which applies when the size of thin pool's
|
||||
* metadata is less then 16MiB.
|
||||
* metadata is less than 16MiB.
|
||||
*/
|
||||
const dm_percent_t meta_min = DM_PERCENT_1 * 25;
|
||||
dm_percent_t meta_free = dm_make_percent(((4096 * 1024) >> SECTOR_SHIFT),
|
||||
|
@ -54,7 +54,7 @@ dd if=/dev/zero of=mnt/zero bs=4K count=100 conv=fdatasync 2>err &
|
||||
|
||||
PERCENT=$(get lv_field $vg/$lv1 copy_percent)
|
||||
PERCENT=${PERCENT%%\.*} # cut decimal
|
||||
# and check less then 50% mirror is in sync (could be unusable delay_dev ?)
|
||||
# and check less than 50% mirror is in sync (could be unusable delay_dev ?)
|
||||
test "$PERCENT" -lt 50 || skip
|
||||
#lvs -a -o+devices $vg
|
||||
|
||||
|
@ -75,7 +75,7 @@ lvcreate -L32M -n $lv3 $vg
|
||||
lvchange -an $vg/thin $vg/thin2 $vg/pool
|
||||
|
||||
# Filling 2M metadata volume
|
||||
# (Test for less then 25% free space in metadata)
|
||||
# (Test for less than 25% free space in metadata)
|
||||
fake_metadata_ 400 2 >data
|
||||
"$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv1"
|
||||
|
||||
@ -89,7 +89,7 @@ fail lvcreate -V20 $vg/pool
|
||||
lvchange -an $vg/pool
|
||||
|
||||
# Consume more then (100% - 4MiB) out of 32MiB metadata volume (>87.5%)
|
||||
# (Test for less then 4MiB free space in metadata, which is less then 25%)
|
||||
# (Test for less than 4MiB free space in metadata, which is less than 25%)
|
||||
fake_metadata_ 7400 2 >data
|
||||
"$LVM_TEST_THIN_RESTORE_CMD" -i data -o "$DM_DEV_DIR/mapper/$vg-$lv2"
|
||||
# Swap volume with restored fake metadata
|
||||
|
@ -5785,7 +5785,7 @@ do_command:
|
||||
|
||||
pv_name = pd->name;
|
||||
|
||||
label_scan_open(pd->dev);
|
||||
label_scan_open_excl(pd->dev);
|
||||
|
||||
log_debug("Creating a new PV on %s.", pv_name);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user