1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

Revert "raid: adjust to misordered raid table line output"

This reverts commit 1e4462dbfb
in favour of an enhanced solution avoiding changes in liblvm
completetly by checking the target versions in libdm and emitting
the respective parameter lines.
This commit is contained in:
Heinz Mauelshagen 2017-03-22 17:50:51 +01:00
parent 14c4d32247
commit 1bf90dac77
4 changed files with 33 additions and 103 deletions

View File

@ -288,7 +288,6 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
#define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */ #define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */
#define RAID_FEATURE_SHRINK (1U << 4) /* version 1.9.0 */ #define RAID_FEATURE_SHRINK (1U << 4) /* version 1.9.0 */
#define RAID_FEATURE_RESHAPE (1U << 5) /* version 1.10.1 */ #define RAID_FEATURE_RESHAPE (1U << 5) /* version 1.10.1 */
#define RAID_FEATURE_PROPER_TABLE_ORDER (1U << 6) /* version >= 1.9.0 and < 1.11.0 had wrong parameter order */
#ifdef RAID_INTERNAL #ifdef RAID_INTERNAL
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib); int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);

View File

@ -228,9 +228,6 @@ static int _raid_text_export(const struct lv_segment *seg, struct formatter *f)
return _raid_text_export_raid(seg, f); return _raid_text_export_raid(seg, f);
} }
static int _raid_target_present(struct cmd_context *cmd,
const struct lv_segment *seg __attribute__((unused)),
unsigned *attributes);
static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)), static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
struct dm_pool *mem __attribute__((unused)), struct dm_pool *mem __attribute__((unused)),
struct cmd_context *cmd __attribute__((unused)), struct cmd_context *cmd __attribute__((unused)),
@ -241,7 +238,6 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
uint32_t *pvmove_mirror_count __attribute__((unused))) uint32_t *pvmove_mirror_count __attribute__((unused)))
{ {
int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0; int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0;
unsigned attrs;
uint32_t s; uint32_t s;
uint64_t flags = 0; uint64_t flags = 0;
uint64_t rebuilds[RAID_BITMAP_SIZE]; uint64_t rebuilds[RAID_BITMAP_SIZE];
@ -304,13 +300,6 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
flags = DM_NOSYNC; flags = DM_NOSYNC;
} }
if (!_raid_target_present(seg->lv->vg->cmd, seg, &attrs))
return_0;
/* RAID target line parameters are in kernel documented order */
if (attrs & RAID_FEATURE_PROPER_TABLE_ORDER)
flags |= DM_RAID_TABLE_ORDERED;
params.raid_type = lvseg_name(seg); params.raid_type = lvseg_name(seg);
if (seg->segtype->parity_devs) { if (seg->segtype->parity_devs) {
@ -490,7 +479,7 @@ static int _raid_target_present(struct cmd_context *cmd,
static int _raid_checked = 0; static int _raid_checked = 0;
static int _raid_present = 0; static int _raid_present = 0;
static unsigned _raid_attrs; static unsigned _raid_attrs = 0;
uint32_t maj, min, patchlevel; uint32_t maj, min, patchlevel;
unsigned i; unsigned i;
@ -499,7 +488,6 @@ static int _raid_target_present(struct cmd_context *cmd,
if (!_raid_checked) { if (!_raid_checked) {
_raid_checked = 1; _raid_checked = 1;
_raid_attrs = RAID_FEATURE_PROPER_TABLE_ORDER;
if (!(_raid_present = target_present(cmd, TARGET_NAME_RAID, 1))) if (!(_raid_present = target_present(cmd, TARGET_NAME_RAID, 1)))
return 0; return 0;
@ -526,15 +514,6 @@ static int _raid_target_present(struct cmd_context *cmd,
else else
log_very_verbose("Target raid does not support %s.", log_very_verbose("Target raid does not support %s.",
SEG_TYPE_NAME_RAID4); SEG_TYPE_NAME_RAID4);
/*
* Target version range check:
*
* raid target line parameters were misordered (e.g. 'raid10_copies')
* in target version >= 1.9.0 and < 1.11.0
*/
if (maj == 1 && min >= 9 && min < 11)
_raid_attrs &= ~RAID_FEATURE_PROPER_TABLE_ORDER;
} }
if (attributes) if (attributes)

View File

@ -1775,16 +1775,6 @@ int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
#define DM_BLOCK_ON_ERROR 0x00000004 /* On error, suspend I/O */ #define DM_BLOCK_ON_ERROR 0x00000004 /* On error, suspend I/O */
#define DM_CORELOG 0x00000008 /* In-memory log */ #define DM_CORELOG 0x00000008 /* In-memory log */
/*
* RAID flag: table line is in kernel documented order.
*
* Target version >= 1.9.0 and < 1.11.0 misordered e.g. 'raid10_copies'
*
* Keep distinct from mirror log ones above because it
* can be passed together with those in load segment flags!
*/
#define DM_RAID_TABLE_ORDERED 0x00000010
int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node, int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
uint32_t region_size, uint32_t region_size,
unsigned clustered, unsigned clustered,

View File

@ -2432,47 +2432,6 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
EMIT_PARAMS(pos, " raid10_format offset"); EMIT_PARAMS(pos, " raid10_format offset");
#endif #endif
if (seg->flags & DM_RAID_TABLE_ORDERED) {
/* Emit order of paramters as of kernel target documentation */
if (seg->flags & DM_NOSYNC)
EMIT_PARAMS(pos, " nosync");
else if (seg->flags & DM_FORCESYNC)
EMIT_PARAMS(pos, " sync");
for (i = 0; i < area_count; i++)
if (seg->rebuilds[i/64] & (1ULL << (i%64)))
EMIT_PARAMS(pos, " rebuild %u", i);
if (seg->min_recovery_rate)
EMIT_PARAMS(pos, " min_recovery_rate %u",
seg->min_recovery_rate);
if (seg->max_recovery_rate)
EMIT_PARAMS(pos, " max_recovery_rate %u",
seg->max_recovery_rate);
for (i = 0; i < area_count; i++)
if (seg->writemostly[i/64] & (1ULL << (i%64)))
EMIT_PARAMS(pos, " write_mostly %u", i);
if (seg->writebehind)
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
if (seg->region_size)
EMIT_PARAMS(pos, " region_size %u", seg->region_size);
if (seg->data_copies > 1 && type == SEG_RAID10)
EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies);
if (seg->delta_disks)
EMIT_PARAMS(pos, " delta_disks %d", seg->delta_disks);
/* If seg-data_offset == 1, kernel needs a zero offset to adjust to it */
if (seg->data_offset)
EMIT_PARAMS(pos, " data_offset %d", seg->data_offset == 1 ? 0 : seg->data_offset);
} else {
/* Target version >= 1.9.0 && < 1.11.0 had a table line parameter ordering flaw */
if (seg->data_copies > 1 && type == SEG_RAID10) if (seg->data_copies > 1 && type == SEG_RAID10)
EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies); EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies);
@ -2502,6 +2461,10 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
if (seg->writebehind) if (seg->writebehind)
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind); EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
/*
* Has to be before "min_recovery_rate" or the kernels
* check will fail when both set and min > previous max
*/
if (seg->max_recovery_rate) if (seg->max_recovery_rate)
EMIT_PARAMS(pos, " max_recovery_rate %u", EMIT_PARAMS(pos, " max_recovery_rate %u",
seg->max_recovery_rate); seg->max_recovery_rate);
@ -2509,7 +2472,6 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
if (seg->min_recovery_rate) if (seg->min_recovery_rate)
EMIT_PARAMS(pos, " min_recovery_rate %u", EMIT_PARAMS(pos, " min_recovery_rate %u",
seg->min_recovery_rate); seg->min_recovery_rate);
}
/* Print number of metadata/data device pairs */ /* Print number of metadata/data device pairs */
EMIT_PARAMS(pos, " %u", area_count); EMIT_PARAMS(pos, " %u", area_count);
@ -2780,7 +2742,7 @@ static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
struct load_segment *seg, uint64_t *seg_start) struct load_segment *seg, uint64_t *seg_start)
{ {
char *params; char *params;
size_t paramsize = 4096; /* FIXME: too small for long RAID lines when > 64 devices supported */ size_t paramsize = 4096;
int ret; int ret;
do { do {