1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-01-02 01:18:26 +03:00

Extend core allocation code in preparation for mirrored log areas.

This commit is contained in:
Alasdair Kergon 2010-03-01 20:00:20 +00:00
parent 630e13edd8
commit 16d9293bd7
4 changed files with 131 additions and 141 deletions

View File

@ -1,5 +1,6 @@
Version 2.02.62 - Version 2.02.62 -
==================================== ====================================
Extend core allocation code in preparation for mirrored log areas.
Rewrite clvmd init script. Rewrite clvmd init script.
Remove lvs_in_vg_activated_by_uuid_only call. Remove lvs_in_vg_activated_by_uuid_only call.
Run device info query device by uuid only. Run device info query device by uuid only.

View File

@ -58,8 +58,7 @@ int lv_add_segment(struct alloc_handle *ah,
const struct segment_type *segtype, const struct segment_type *segtype,
uint32_t stripe_size, uint32_t stripe_size,
uint64_t status, uint64_t status,
uint32_t region_size, uint32_t region_size);
struct logical_volume *log_lv);
int lv_add_mirror_areas(struct alloc_handle *ah, int lv_add_mirror_areas(struct alloc_handle *ah,
struct logical_volume *lv, uint32_t le, struct logical_volume *lv, uint32_t le,

View File

@ -507,16 +507,21 @@ struct alloc_handle {
struct dm_pool *mem; struct dm_pool *mem;
alloc_policy_t alloc; /* Overall policy */ alloc_policy_t alloc; /* Overall policy */
uint32_t new_extents; /* Number of new extents required */
uint32_t area_count; /* Number of parallel areas */ uint32_t area_count; /* Number of parallel areas */
uint32_t area_multiple; /* seg->len = area_len * area_multiple */ uint32_t area_multiple; /* seg->len = area_len * area_multiple */
uint32_t log_count; /* Number of parallel 1-extent logs */ uint32_t log_area_count; /* Number of parallel logs */
uint32_t log_region_size; /* region size for log device */ uint32_t log_len; /* Length of log */
uint32_t region_size; /* Mirror region size */
uint32_t total_area_len; /* Total number of parallel extents */ uint32_t total_area_len; /* Total number of parallel extents */
struct dm_list *parallel_areas; /* PVs to avoid */ struct dm_list *parallel_areas; /* PVs to avoid */
struct alloced_area log_area; /* Extent used for log */ /*
struct dm_list alloced_areas[0]; /* Lists of areas in each stripe */ * Contains area_count lists of areas allocated to data stripes
* followed by log_area_count lists of areas allocated to log stripes.
*/
struct dm_list alloced_areas[0];
}; };
static uint32_t calc_area_multiple(const struct segment_type *segtype, static uint32_t calc_area_multiple(const struct segment_type *segtype,
@ -528,6 +533,28 @@ static uint32_t calc_area_multiple(const struct segment_type *segtype,
return area_count; return area_count;
} }
/*
* Returns log device size in extents, algorithm from kernel code
*/
#define BYTE_SHIFT 3
static uint32_t mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint32_t area_len)
{
size_t area_size, bitset_size, log_size, region_count;
area_size = area_len * pe_size;
region_count = dm_div_up(area_size, region_size);
/* Work out how many "unsigned long"s we need to hold the bitset. */
bitset_size = dm_round_up(region_count, sizeof(uint32_t) << BYTE_SHIFT);
bitset_size >>= BYTE_SHIFT;
/* Log device holds both header and bitset. */
log_size = dm_round_up((MIRROR_LOG_OFFSET << SECTOR_SHIFT) + bitset_size, 1 << SECTOR_SHIFT);
log_size >>= SECTOR_SHIFT;
return dm_div_up(log_size, pe_size);
}
/* /*
* Preparation for a specific allocation attempt * Preparation for a specific allocation attempt
*/ */
@ -535,10 +562,12 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
struct dm_pool *mem, struct dm_pool *mem,
const struct segment_type *segtype, const struct segment_type *segtype,
alloc_policy_t alloc, alloc_policy_t alloc,
uint32_t new_extents,
uint32_t mirrors, uint32_t mirrors,
uint32_t stripes, uint32_t stripes,
uint32_t log_count, uint32_t log_area_count,
uint32_t log_region_size, uint32_t extent_size,
uint32_t region_size,
struct dm_list *parallel_areas) struct dm_list *parallel_areas)
{ {
struct alloc_handle *ah; struct alloc_handle *ah;
@ -549,7 +578,7 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
return NULL; return NULL;
} }
if (log_count && stripes > 1) { if (log_area_count && stripes > 1) {
log_error("Can't mix striping with a mirror log yet."); log_error("Can't mix striping with a mirror log yet.");
return NULL; return NULL;
} }
@ -561,7 +590,7 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
else else
area_count = stripes; area_count = stripes;
if (!(ah = dm_pool_zalloc(mem, sizeof(*ah) + sizeof(ah->alloced_areas[0]) * area_count))) { if (!(ah = dm_pool_zalloc(mem, sizeof(*ah) + sizeof(ah->alloced_areas[0]) * (area_count + log_area_count)))) {
log_error("allocation handle allocation failed"); log_error("allocation handle allocation failed");
return NULL; return NULL;
} }
@ -576,13 +605,16 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
return NULL; return NULL;
} }
ah->new_extents = new_extents;
ah->area_count = area_count; ah->area_count = area_count;
ah->log_count = log_count; ah->log_area_count = log_area_count;
ah->log_region_size = log_region_size; ah->region_size = region_size;
ah->alloc = alloc; ah->alloc = alloc;
ah->area_multiple = calc_area_multiple(segtype, area_count); ah->area_multiple = calc_area_multiple(segtype, area_count);
for (s = 0; s < ah->area_count; s++) ah->log_len = log_area_count ? mirror_log_extents(ah->region_size, extent_size, ah->new_extents / ah->area_multiple) : 0;
for (s = 0; s < ah->area_count + ah->log_area_count; s++)
dm_list_init(&ah->alloced_areas[s]); dm_list_init(&ah->alloced_areas[s]);
ah->parallel_areas = parallel_areas; ah->parallel_areas = parallel_areas;
@ -644,15 +676,13 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
uint32_t stripe_size, uint32_t stripe_size,
const struct segment_type *segtype, const struct segment_type *segtype,
struct alloced_area *aa, struct alloced_area *aa,
uint32_t region_size, uint32_t region_size)
struct logical_volume *log_lv __attribute((unused)))
{ {
uint32_t s, extents, area_multiple; uint32_t s, extents, area_multiple;
struct lv_segment *seg; struct lv_segment *seg;
area_multiple = calc_area_multiple(segtype, area_count); area_multiple = calc_area_multiple(segtype, area_count);
/* log_lv gets set up elsehere */
if (!(seg = alloc_lv_segment(lv->vg->cmd->mem, segtype, lv, if (!(seg = alloc_lv_segment(lv->vg->cmd->mem, segtype, lv,
lv->le_count, lv->le_count,
aa[0].len * area_multiple, aa[0].len * area_multiple,
@ -685,58 +715,36 @@ static int _setup_alloced_segments(struct logical_volume *lv,
uint64_t status, uint64_t status,
uint32_t stripe_size, uint32_t stripe_size,
const struct segment_type *segtype, const struct segment_type *segtype,
uint32_t region_size, uint32_t region_size)
struct logical_volume *log_lv)
{ {
struct alloced_area *aa; struct alloced_area *aa;
dm_list_iterate_items(aa, &alloced_areas[0]) { dm_list_iterate_items(aa, &alloced_areas[0]) {
if (!_setup_alloced_segment(lv, status, area_count, if (!_setup_alloced_segment(lv, status, area_count,
stripe_size, segtype, aa, stripe_size, segtype, aa,
region_size, log_lv)) region_size))
return_0; return_0;
} }
return 1; return 1;
} }
/*
* Returns log device size in extents, algorithm from kernel code
*/
#define BYTE_SHIFT 3
static uint32_t mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint32_t area_len)
{
size_t area_size, bitset_size, log_size, region_count;
area_size = area_len * pe_size;
region_count = dm_div_up(area_size, region_size);
/* Work out how many "unsigned long"s we need to hold the bitset. */
bitset_size = dm_round_up(region_count, sizeof(uint32_t) << BYTE_SHIFT);
bitset_size >>= BYTE_SHIFT;
/* Log device holds both header and bitset. */
log_size = dm_round_up((MIRROR_LOG_OFFSET << SECTOR_SHIFT) + bitset_size, 1 << SECTOR_SHIFT);
log_size >>= SECTOR_SHIFT;
return dm_div_up(log_size, pe_size);
}
/* /*
* This function takes a list of pv_areas and adds them to allocated_areas. * This function takes a list of pv_areas and adds them to allocated_areas.
* If the complete area is not needed then it gets split. * If the complete area is not needed then it gets split.
* The part used is removed from the pv_map so it can't be allocated twice. * The part used is removed from the pv_map so it can't be allocated twice.
*/ */
static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed, static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
struct pv_area **areas, struct pv_area **areas, uint32_t *allocated,
uint32_t *ix, struct pv_area *log_area, unsigned log_needs_allocating, uint32_t ix_log_offset)
uint32_t log_len)
{ {
uint32_t area_len, remaining; uint32_t area_len, len, remaining;
uint32_t s; uint32_t s;
uint32_t ix_log_skip = 0; /* How many areas to skip in middle of array to reach log areas */
uint32_t total_area_count = ah->area_count + (log_needs_allocating ? ah->log_area_count : 0);
struct alloced_area *aa; struct alloced_area *aa;
remaining = needed - *ix; remaining = needed - *allocated;
area_len = remaining / ah->area_multiple; area_len = remaining / ah->area_multiple;
/* Reduce area_len to the smallest of the areas */ /* Reduce area_len to the smallest of the areas */
@ -744,32 +752,35 @@ static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
if (area_len > areas[s]->count) if (area_len > areas[s]->count)
area_len = areas[s]->count; area_len = areas[s]->count;
if (!(aa = dm_pool_alloc(ah->mem, sizeof(*aa) * if (!(aa = dm_pool_alloc(ah->mem, sizeof(*aa) * total_area_count))) {
(ah->area_count + (log_area ? 1 : 0))))) {
log_error("alloced_area allocation failed"); log_error("alloced_area allocation failed");
return 0; return 0;
} }
for (s = 0; s < ah->area_count; s++) { /*
aa[s].pv = areas[s]->map->pv; * Areas consists of area_count areas for data stripes, then
aa[s].pe = areas[s]->start; * ix_log_skip areas to skip, then log_area_count areas to use for the
aa[s].len = area_len; * log, then some areas too small for the log.
*/
len = area_len;
for (s = 0; s < total_area_count; s++) {
if (s == ah->area_count) {
ix_log_skip = ix_log_offset - ah->area_count;
len = ah->log_len;
}
aa[s].pv = areas[s + ix_log_skip]->map->pv;
aa[s].pe = areas[s + ix_log_skip]->start;
aa[s].len = len;
consume_pv_area(areas[s + ix_log_skip], len);
dm_list_add(&ah->alloced_areas[s], &aa[s].list); dm_list_add(&ah->alloced_areas[s], &aa[s].list);
} }
ah->total_area_len += area_len; ah->total_area_len += area_len;
for (s = 0; s < ah->area_count; s++) *allocated += area_len * ah->area_multiple;
consume_pv_area(areas[s], area_len);
if (log_area) {
ah->log_area.pv = log_area->map->pv;
ah->log_area.pe = log_area->start;
ah->log_area.len = log_len;
consume_pv_area(log_area, ah->log_area.len);
}
*ix += area_len * ah->area_multiple;
return 1; return 1;
} }
@ -990,15 +1001,16 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
unsigned contiguous = 0, cling = 0, preferred_count = 0; unsigned contiguous = 0, cling = 0, preferred_count = 0;
unsigned ix; unsigned ix;
unsigned ix_offset = 0; /* Offset for non-preferred allocations */ unsigned ix_offset = 0; /* Offset for non-preferred allocations */
unsigned ix_log_offset; /* Offset to start of areas to use for log */
unsigned too_small_for_log_count; /* How many too small for log? */ unsigned too_small_for_log_count; /* How many too small for log? */
uint32_t max_parallel; /* Maximum extents to allocate */ uint32_t max_parallel; /* Maximum extents to allocate */
uint32_t next_le; uint32_t next_le;
struct seg_pvs *spvs; struct seg_pvs *spvs;
struct dm_list *parallel_pvs; struct dm_list *parallel_pvs;
uint32_t free_pes; uint32_t free_pes;
uint32_t log_len;
struct pv_area *log_area;
unsigned log_needs_allocating; unsigned log_needs_allocating;
struct alloced_area *aa;
uint32_t s;
/* Is there enough total space? */ /* Is there enough total space? */
free_pes = pv_maps_size(pvms); free_pes = pv_maps_size(pvms);
@ -1062,9 +1074,11 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
if (alloc != ALLOC_ANYWHERE) { if (alloc != ALLOC_ANYWHERE) {
/* Don't allocate onto the log pv */ /* Don't allocate onto the log pv */
if (ah->log_count && if (ah->log_area_count)
pvm->pv == ah->log_area.pv) dm_list_iterate_items(aa, &ah->alloced_areas[ah->area_count])
continue; /* Next PV */ for (s = 0; s < ah->log_area_count; s++)
if (!aa[s].pv)
goto next_pv;
/* Avoid PVs used by existing parallel areas */ /* Avoid PVs used by existing parallel areas */
if (parallel_pvs) if (parallel_pvs)
@ -1102,7 +1116,7 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
/* Is it big enough on its own? */ /* Is it big enough on its own? */
if (pva->count * ah->area_multiple < if (pva->count * ah->area_multiple <
max_parallel - *allocated && max_parallel - *allocated &&
((!can_split && !ah->log_count) || ((!can_split && !ah->log_area_count) ||
(already_found_one && (already_found_one &&
!(alloc == ALLOC_ANYWHERE)))) !(alloc == ALLOC_ANYWHERE))))
goto next_pv; goto next_pv;
@ -1123,11 +1137,11 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
if ((contiguous || cling) && (preferred_count < ix_offset)) if ((contiguous || cling) && (preferred_count < ix_offset))
break; break;
log_needs_allocating = (ah->log_count && !ah->log_area.len) ? log_needs_allocating = (ah->log_area_count &&
1 : 0; dm_list_empty(&ah->alloced_areas[ah->area_count])) ? 1 : 0;
if (ix + ix_offset < ah->area_count + if (ix + ix_offset < ah->area_count +
(log_needs_allocating ? ah->log_count : 0)) (log_needs_allocating ? ah->log_area_count : 0))
break; break;
/* sort the areas so we allocate from the biggest */ /* sort the areas so we allocate from the biggest */
@ -1138,38 +1152,28 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
/* /*
* First time around, if there's a log, allocate it on the * First time around, if there's a log, allocate it on the
* smallest device that has space for it. * smallest device that has space for it.
*
* FIXME decide which PV to use at top of function instead
*/ */
too_small_for_log_count = 0; too_small_for_log_count = 0;
ix_log_offset = 0;
if (!log_needs_allocating) { /* FIXME This logic is due to its heritage and can be simplified! */
log_len = 0; if (log_needs_allocating) {
log_area = NULL;
} else {
log_len = mirror_log_extents(ah->log_region_size,
pv_pe_size((*areas)->map->pv),
(max_parallel - *allocated) / ah->area_multiple);
/* How many areas are too small for the log? */ /* How many areas are too small for the log? */
while (too_small_for_log_count < ix_offset + ix && while (too_small_for_log_count < ix_offset + ix &&
(*(areas + ix_offset + ix - 1 - (*(areas + ix_offset + ix - 1 -
too_small_for_log_count))->count < log_len) too_small_for_log_count))->count < ah->log_len)
too_small_for_log_count++; too_small_for_log_count++;
ix_log_offset = ix_offset + ix - too_small_for_log_count - ah->log_area_count;
log_area = *(areas + ix_offset + ix - 1 -
too_small_for_log_count);
} }
if (ix + ix_offset < ah->area_count + if (ix + ix_offset < ah->area_count +
(log_needs_allocating ? ah->log_count + (log_needs_allocating ? ah->log_area_count +
too_small_for_log_count : 0)) too_small_for_log_count : 0))
/* FIXME With ALLOC_ANYWHERE, need to split areas */ /* FIXME With ALLOC_ANYWHERE, need to split areas */
break; break;
if (!_alloc_parallel_area(ah, max_parallel, areas, allocated, if (!_alloc_parallel_area(ah, max_parallel, areas, allocated,
log_area, log_len)) log_needs_allocating, ix_log_offset))
return_0; return_0;
} while (!contiguous && *allocated != needed && can_split); } while (!contiguous && *allocated != needed && can_split);
@ -1185,7 +1189,6 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
static int _allocate(struct alloc_handle *ah, static int _allocate(struct alloc_handle *ah,
struct volume_group *vg, struct volume_group *vg,
struct logical_volume *lv, struct logical_volume *lv,
uint32_t new_extents,
unsigned can_split, unsigned can_split,
struct dm_list *allocatable_pvs) struct dm_list *allocatable_pvs)
{ {
@ -1197,8 +1200,9 @@ static int _allocate(struct alloc_handle *ah,
struct dm_list *pvms; struct dm_list *pvms;
uint32_t areas_size; uint32_t areas_size;
alloc_policy_t alloc; alloc_policy_t alloc;
struct alloced_area *aa;
if (allocated >= new_extents && !ah->log_count) { if (allocated >= ah->new_extents && !ah->log_area_count) {
log_error("_allocate called with no work to do!"); log_error("_allocate called with no work to do!");
return 1; return 1;
} }
@ -1219,14 +1223,14 @@ static int _allocate(struct alloc_handle *ah,
stack; stack;
areas_size = dm_list_size(pvms); areas_size = dm_list_size(pvms);
if (areas_size && areas_size < (ah->area_count + ah->log_count)) { if (areas_size && areas_size < (ah->area_count + ah->log_area_count)) {
if (ah->alloc != ALLOC_ANYWHERE) { if (ah->alloc != ALLOC_ANYWHERE) {
log_error("Not enough PVs with free space available " log_error("Not enough PVs with free space available "
"for parallel allocation."); "for parallel allocation.");
log_error("Consider --alloc anywhere if desperate."); log_error("Consider --alloc anywhere if desperate.");
return 0; return 0;
} }
areas_size = ah->area_count + ah->log_count; areas_size = ah->area_count + ah->log_area_count;
} }
/* Upper bound if none of the PVs in prev_lvseg is in pvms */ /* Upper bound if none of the PVs in prev_lvseg is in pvms */
@ -1245,29 +1249,31 @@ static int _allocate(struct alloc_handle *ah,
old_allocated = allocated; old_allocated = allocated;
if (!_find_parallel_space(ah, alloc, pvms, areas, if (!_find_parallel_space(ah, alloc, pvms, areas,
areas_size, can_split, areas_size, can_split,
prev_lvseg, &allocated, new_extents)) prev_lvseg, &allocated, ah->new_extents))
goto_out; goto_out;
if ((allocated == new_extents) || (ah->alloc == alloc) || if ((allocated == ah->new_extents) || (ah->alloc == alloc) ||
(!can_split && (allocated != old_allocated))) (!can_split && (allocated != old_allocated)))
break; break;
} }
if (allocated != new_extents) { if (allocated != ah->new_extents) {
log_error("Insufficient suitable %sallocatable extents " log_error("Insufficient suitable %sallocatable extents "
"for logical volume %s: %u more required", "for logical volume %s: %u more required",
can_split ? "" : "contiguous ", can_split ? "" : "contiguous ",
lv ? lv->name : "", lv ? lv->name : "",
(new_extents - allocated) * ah->area_count (ah->new_extents - allocated) * ah->area_count
/ ah->area_multiple); / ah->area_multiple);
goto out; goto out;
} }
if (ah->log_count && !ah->log_area.len) { if (ah->log_area_count)
log_error("Insufficient extents for log allocation " dm_list_iterate_items(aa, &ah->alloced_areas[ah->area_count])
"for logical volume %s.", if (!aa[0].pv) {
lv ? lv->name : ""); log_error("Insufficient extents for log allocation "
goto out; "for logical volume %s.",
} lv ? lv->name : "");
goto out;
}
r = 1; r = 1;
@ -1306,12 +1312,13 @@ struct alloc_handle *allocate_extents(struct volume_group *vg,
const struct segment_type *segtype, const struct segment_type *segtype,
uint32_t stripes, uint32_t stripes,
uint32_t mirrors, uint32_t log_count, uint32_t mirrors, uint32_t log_count,
uint32_t log_region_size, uint32_t extents, uint32_t region_size, uint32_t extents,
struct dm_list *allocatable_pvs, struct dm_list *allocatable_pvs,
alloc_policy_t alloc, alloc_policy_t alloc,
struct dm_list *parallel_areas) struct dm_list *parallel_areas)
{ {
struct alloc_handle *ah; struct alloc_handle *ah;
uint32_t new_extents;
if (segtype_is_virtual(segtype)) { if (segtype_is_virtual(segtype)) {
log_error("allocate_extents does not handle virtual segments"); log_error("allocate_extents does not handle virtual segments");
@ -1331,13 +1338,15 @@ struct alloc_handle *allocate_extents(struct volume_group *vg,
if (alloc == ALLOC_INHERIT) if (alloc == ALLOC_INHERIT)
alloc = vg->alloc; alloc = vg->alloc;
if (!(ah = _alloc_init(vg->cmd, vg->cmd->mem, segtype, alloc, mirrors, new_extents = (lv ? lv->le_count : 0) + extents;
stripes, log_count, log_region_size, parallel_areas))) if (!(ah = _alloc_init(vg->cmd, vg->cmd->mem, segtype, alloc,
new_extents, mirrors, stripes, log_count,
vg->extent_size, region_size,
parallel_areas)))
return_NULL; return_NULL;
if (!segtype_is_virtual(segtype) && if (!segtype_is_virtual(segtype) &&
!_allocate(ah, vg, lv, (lv ? lv->le_count : 0) + extents, !_allocate(ah, vg, lv, 1, allocatable_pvs)) {
1, allocatable_pvs)) {
alloc_destroy(ah); alloc_destroy(ah);
return_NULL; return_NULL;
} }
@ -1354,8 +1363,7 @@ int lv_add_segment(struct alloc_handle *ah,
const struct segment_type *segtype, const struct segment_type *segtype,
uint32_t stripe_size, uint32_t stripe_size,
uint64_t status, uint64_t status,
uint32_t region_size, uint32_t region_size)
struct logical_volume *log_lv)
{ {
if (!segtype) { if (!segtype) {
log_error("Missing segtype in lv_add_segment()."); log_error("Missing segtype in lv_add_segment().");
@ -1367,10 +1375,15 @@ int lv_add_segment(struct alloc_handle *ah,
return 0; return 0;
} }
if ((status & MIRROR_LOG) && dm_list_size(&lv->segments)) {
log_error("Log segments can only be added to an empty LV");
return 0;
}
if (!_setup_alloced_segments(lv, &ah->alloced_areas[first_area], if (!_setup_alloced_segments(lv, &ah->alloced_areas[first_area],
num_areas, status, num_areas, status,
stripe_size, segtype, stripe_size, segtype,
region_size, log_lv)) region_size))
return_0; return_0;
if ((segtype->flags & SEG_CAN_SPLIT) && !lv_merge_segments(lv)) { if ((segtype->flags & SEG_CAN_SPLIT) && !lv_merge_segments(lv)) {
@ -1546,34 +1559,11 @@ int lv_add_mirror_lvs(struct logical_volume *lv,
*/ */
int lv_add_log_segment(struct alloc_handle *ah, struct logical_volume *log_lv) int lv_add_log_segment(struct alloc_handle *ah, struct logical_volume *log_lv)
{ {
struct lv_segment *seg; const char *segtype_name = ah->log_area_count > 1 ? "mirror" : "striped";
if (dm_list_size(&log_lv->segments)) { return lv_add_segment(ah, ah->area_count, ah->log_area_count, log_lv,
log_error("Log segments can only be added to an empty LV"); get_segtype_from_string(log_lv->vg->cmd, segtype_name),
return 0; 0, MIRROR_LOG, 0);
}
if (!(seg = alloc_lv_segment(log_lv->vg->cmd->mem,
get_segtype_from_string(log_lv->vg->cmd,
"striped"),
log_lv, 0, ah->log_area.len, MIRROR_LOG,
0, NULL, 1, ah->log_area.len, 0, 0, 0))) {
log_error("Couldn't allocate new mirror log segment.");
return 0;
}
if (!set_lv_segment_area_pv(seg, 0, ah->log_area.pv, ah->log_area.pe))
return_0;
dm_list_add(&log_lv->segments, &seg->list);
log_lv->le_count += ah->log_area.len;
log_lv->size += (uint64_t) log_lv->le_count * log_lv->vg->extent_size;
if (log_lv->vg->fid->fmt->ops->lv_setup &&
!log_lv->vg->fid->fmt->ops->lv_setup(log_lv->vg->fid, log_lv))
return_0;
return 1;
} }
static int _lv_extend_mirror(struct alloc_handle *ah, static int _lv_extend_mirror(struct alloc_handle *ah,
@ -1595,7 +1585,7 @@ static int _lv_extend_mirror(struct alloc_handle *ah,
if (!lv_add_segment(ah, m++, 1, seg_lv(seg, s), if (!lv_add_segment(ah, m++, 1, seg_lv(seg, s),
get_segtype_from_string(lv->vg->cmd, get_segtype_from_string(lv->vg->cmd,
"striped"), "striped"),
0, 0, 0, NULL)) { 0, 0, 0)) {
log_error("Aborting. Failed to extend %s.", log_error("Aborting. Failed to extend %s.",
seg_lv(seg, s)->name); seg_lv(seg, s)->name);
return 0; return 0;
@ -1633,7 +1623,7 @@ int lv_extend(struct logical_volume *lv,
if (mirrors < 2) if (mirrors < 2)
r = lv_add_segment(ah, 0, ah->area_count, lv, segtype, r = lv_add_segment(ah, 0, ah->area_count, lv, segtype,
stripe_size, status, 0, NULL); stripe_size, status, 0);
else else
r = _lv_extend_mirror(ah, lv, extents, 0); r = _lv_extend_mirror(ah, lv, extents, 0);

View File

@ -1202,7 +1202,7 @@ static int _create_mimage_lvs(struct alloc_handle *ah,
if (!lv_add_segment(ah, m, 1, img_lvs[m], if (!lv_add_segment(ah, m, 1, img_lvs[m],
get_segtype_from_string(lv->vg->cmd, get_segtype_from_string(lv->vg->cmd,
"striped"), "striped"),
0, 0, 0, NULL)) { 0, 0, 0)) {
log_error("Aborting. Failed to add mirror image segment " log_error("Aborting. Failed to add mirror image segment "
"to %s. Remove new LV and retry.", "to %s. Remove new LV and retry.",
img_lvs[m]->name); img_lvs[m]->name);