1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

Extend _check_contiguous() to detect single-area LVs.

Include mirror log (untested) in _for_each_pv() processing.
Use MIRROR_LOG_SIZE constant.
Remove struct seg_pvs from _for_each_pv() for generalisation.
Avoid adding duplicates to list of parallel PVs to avoid.
This commit is contained in:
Alasdair Kergon 2006-09-11 21:14:56 +00:00
parent d9c6bbab80
commit bbb068562e
3 changed files with 51 additions and 31 deletions

View File

@ -1,5 +1,10 @@
Version 2.02.10 -
==================================
Extend _check_contiguous() to detect single-area LVs.
Include mirror log (untested) in _for_each_pv() processing.
Use MIRROR_LOG_SIZE constant.
Remove struct seg_pvs from _for_each_pv() to generalise.
Avoid adding duplicates to list of parallel PVs to avoid.
Fix several incorrect comparisons in parallel area avoidance code.
Fix segment lengths when flattening existing parallel areas.
Log existing parallel areas prior to allocation.

View File

@ -657,7 +657,7 @@ static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
if (log_area) {
ah->log_area.pv = log_area->map->pv;
ah->log_area.pe = log_area->start;
ah->log_area.len = 1; /* FIXME Calculate & check this */
ah->log_area.len = MIRROR_LOG_SIZE; /* FIXME Calculate & check this */
consume_pv_area(log_area, ah->log_area.len);
}
@ -685,14 +685,21 @@ static int _comp_area(const void *l, const void *r)
*/
static int _check_contiguous(struct lv_segment *prev_lvseg,
struct physical_volume *pv, struct pv_area *pva,
struct pv_area **areas)
struct pv_area **areas, uint32_t areas_size)
{
struct pv_segment *prev_pvseg;
struct lv_segment *lastseg;
uint32_t s;
for (s = 0; s < prev_lvseg->area_count; s++) {
if (seg_type(prev_lvseg, s) != AREA_PV)
continue; /* FIXME Broken */
for (s = 0; s < prev_lvseg->area_count && s < areas_size; s++) {
if (seg_type(prev_lvseg, s) == AREA_LV) {
lastseg = list_item(list_last(&seg_lv(prev_lvseg, s)->segments), struct lv_segment);
/* FIXME For more areas supply flattened prev_lvseg to ensure consistency */
if (lastseg->area_count == 1 &&
_check_contiguous(lastseg, pv, pva, &areas[s], 1))
return 1;
continue;
}
if (!(prev_pvseg = seg_pvseg(prev_lvseg, s)))
continue; /* FIXME Broken */
@ -796,7 +803,8 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
if (prev_lvseg &&
_check_contiguous(prev_lvseg,
pvm->pv,
pva, areas)) {
pva, areas,
areas_size)) {
contiguous_count++;
goto next_pv;
}
@ -1374,11 +1382,15 @@ struct logical_volume *lv_create_empty(struct format_instance *fi,
return lv;
}
/* Recursively process each PV used by part of an LV */
/*
* Call fn for each AREA_PV used by the LV segment at lv:le of length *max_seg_len.
* If any constituent area contains more than one segment, max_seg_len is
* reduced to cover only the first.
*/
static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
uint32_t le, uint32_t len,
int (*fn)(struct cmd_context *cmd, struct pv_segment *peg, struct seg_pvs *spvs),
struct seg_pvs *spvs)
uint32_t le, uint32_t len, uint32_t *max_seg_len,
int (*fn)(struct cmd_context *cmd, struct pv_segment *peg, void *data),
void *data)
{
struct lv_segment *seg;
uint32_t s;
@ -1396,36 +1408,41 @@ static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
if (remaining_seg_len > len)
remaining_seg_len = len;
if (spvs->len > remaining_seg_len)
spvs->len = remaining_seg_len;
if (max_seg_len && *max_seg_len > remaining_seg_len)
*max_seg_len = remaining_seg_len;
area_multiple = segtype_is_striped(seg->segtype) ? seg->area_count : 1;
area_len = remaining_seg_len / area_multiple;
area_len = remaining_seg_len / area_multiple ? : 1;
for (s = 0; s < seg->area_count; s++) {
for (s = 0; s < seg->area_count; s++)
if (seg_type(seg, s) == AREA_LV) {
if (!_for_each_pv(cmd, seg_lv(seg, s),
seg_le(seg, s) + (le - seg->le) / area_multiple,
area_len, fn, spvs)) {
stack;
return 0;
}
} else if (seg_type(seg, s) == AREA_PV) {
if (!fn(cmd, seg_pvseg(seg, s), spvs)) {
stack;
return 0;
}
}
}
area_len, max_seg_len, fn, data))
return_0;
} else if ((seg_type(seg, s) == AREA_PV) &&
!fn(cmd, seg_pvseg(seg, s), data))
return_0;
if (seg_is_mirrored(seg) &&
!_for_each_pv(cmd, seg->log_lv, 0, MIRROR_LOG_SIZE,
NULL, fn, data))
return_0;
/* FIXME Add snapshot cow LVs etc. */
return 1;
}
static int _add_pvs(struct cmd_context *cmd, struct pv_segment *peg, struct seg_pvs *spvs)
static int _add_pvs(struct cmd_context *cmd, struct pv_segment *peg, void *data)
{
struct seg_pvs *spvs = (struct seg_pvs *) data;
struct pv_list *pvl;
/* FIXME Don't add again if it's already on the list! */
/* Don't add again if it's already on list. */
list_iterate_items(pvl, &spvs->pvs)
if (pvl->pv == peg->pv)
return 1;
if (!(pvl = dm_pool_alloc(cmd->mem, sizeof(*pvl)))) {
log_error("pv_list allocation failed");
@ -1434,11 +1451,8 @@ static int _add_pvs(struct cmd_context *cmd, struct pv_segment *peg, struct seg_
pvl->pv = peg->pv;
/* FIXME Use ordered list to facilitate comparison */
list_add(&spvs->pvs, &pvl->list);
/* FIXME Add mirror logs, snapshot cow LVs etc. */
return 1;
}
@ -1474,7 +1488,7 @@ struct list *build_parallel_areas_from_lv(struct cmd_context *cmd,
/* Find next segment end */
/* FIXME Unnecessary nesting! */
if (!_for_each_pv(cmd, lv, current_le, lv->le_count - current_le, _add_pvs, spvs)) {
if (!_for_each_pv(cmd, lv, current_le, spvs->len, &spvs->len, _add_pvs, (void *) spvs)) {
stack;
return NULL;
}

View File

@ -34,6 +34,7 @@
#define STRIPE_SIZE_LIMIT ((UINT_MAX >> 2) + 1)
#define PV_MIN_SIZE ( 512L * 1024L >> SECTOR_SHIFT) /* 512 KB in sectors */
#define MAX_RESTRICTED_LVS 255 /* Used by FMT_RESTRICTED_LVIDS */
#define MIRROR_LOG_SIZE 1 /* Extents */
/* Various flags */
/* Note that the bits no longer necessarily correspond to LVM1 disk format */