1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-01-03 05:18:29 +03:00

_ for static fns

This commit is contained in:
Alasdair Kergon 2011-08-19 15:59:15 +00:00
parent 97db301ad4
commit 3250b38583
3 changed files with 63 additions and 67 deletions

View File

@ -1,10 +1,10 @@
Version 2.02.88 - Version 2.02.88 -
================================== ==================================
Add --merge support for RAID1 images that were split with --trackchanges Add lvconvert --merge support for raid1 devices split with --trackchanges.
Add support for m-way to n-way up-convert in RAID1 (no linear to n-way yet) Support lvconvert of -m1 raid1 devices to a higher number.
Add --trackchanges support to --splitmirrors option for RAID1 Add --trackchanges support to lvconvert --splitmirrors option for raid1.
Add --splitmirrors support for RAID1 (1 image only) Support splitting off a single raid1 rimage in lvconvert --splitmirrors.
When down-converting RAID1, don't activate sub-lvs between suspend/resume Use sync_local_dev_names when reducing number of raid rimages in lvconvert.
Add -V as short form of --virtualsize in lvcreate. Add -V as short form of --virtualsize in lvcreate.
Fix make clean not to remove Makefile. (2.02.87) Fix make clean not to remove Makefile. (2.02.87)

View File

@ -51,7 +51,7 @@ static int _activate_sublv_preserving_excl(struct logical_volume *top_lv,
} }
/* /*
* lv_is_on_pv * _lv_is_on_pv
* @lv: * @lv:
* @pv: * @pv:
* *
@ -65,7 +65,7 @@ static int _activate_sublv_preserving_excl(struct logical_volume *top_lv,
* and be put in lv_manip.c. 'for_each_sub_lv' does not yet allow us to * and be put in lv_manip.c. 'for_each_sub_lv' does not yet allow us to
* short-circuit execution or pass back the values we need yet though... * short-circuit execution or pass back the values we need yet though...
*/ */
static int lv_is_on_pv(struct logical_volume *lv, struct physical_volume *pv) static int _lv_is_on_pv(struct logical_volume *lv, struct physical_volume *pv)
{ {
uint32_t s; uint32_t s;
struct physical_volume *pv2; struct physical_volume *pv2;
@ -79,7 +79,7 @@ static int lv_is_on_pv(struct logical_volume *lv, struct physical_volume *pv)
return 0; return 0;
/* Check mirror log */ /* Check mirror log */
if (lv_is_on_pv(seg->log_lv, pv)) if (_lv_is_on_pv(seg->log_lv, pv))
return 1; return 1;
/* Check stack of LVs */ /* Check stack of LVs */
@ -95,14 +95,14 @@ static int lv_is_on_pv(struct logical_volume *lv, struct physical_volume *pv)
} }
if ((seg_type(seg, s) == AREA_LV) && if ((seg_type(seg, s) == AREA_LV) &&
lv_is_on_pv(seg_lv(seg, s), pv)) _lv_is_on_pv(seg_lv(seg, s), pv))
return 1; return 1;
if (!seg_is_raid(seg)) if (!seg_is_raid(seg))
continue; continue;
/* This is RAID, so we know the meta_area is AREA_LV */ /* This is RAID, so we know the meta_area is AREA_LV */
if (lv_is_on_pv(seg_metalv(seg, s), pv)) if (_lv_is_on_pv(seg_metalv(seg, s), pv))
return 1; return 1;
} }
} }
@ -110,12 +110,12 @@ static int lv_is_on_pv(struct logical_volume *lv, struct physical_volume *pv)
return 0; return 0;
} }
static int lv_is_on_pvs(struct logical_volume *lv, struct dm_list *pvs) static int _lv_is_on_pvs(struct logical_volume *lv, struct dm_list *pvs)
{ {
struct pv_list *pvl; struct pv_list *pvl;
dm_list_iterate_items(pvl, pvs) dm_list_iterate_items(pvl, pvs)
if (lv_is_on_pv(lv, pvl->pv)) { if (_lv_is_on_pv(lv, pvl->pv)) {
log_debug("%s is on %s", lv->name, log_debug("%s is on %s", lv->name,
pv_dev_name(pvl->pv)); pv_dev_name(pvl->pv));
return 1; return 1;
@ -125,7 +125,7 @@ static int lv_is_on_pvs(struct logical_volume *lv, struct dm_list *pvs)
return 0; return 0;
} }
static int raid_in_sync(struct logical_volume *lv) static int _raid_in_sync(struct logical_volume *lv)
{ {
percent_t sync_percent; percent_t sync_percent;
@ -139,7 +139,7 @@ static int raid_in_sync(struct logical_volume *lv)
} }
/* /*
* raid_remove_top_layer * _raid_remove_top_layer
* @lv * @lv
* @removal_list * @removal_list
* *
@ -149,7 +149,7 @@ static int raid_in_sync(struct logical_volume *lv)
* *
* Returns: 1 on succes, 0 on failure * Returns: 1 on succes, 0 on failure
*/ */
static int raid_remove_top_layer(struct logical_volume *lv, static int _raid_remove_top_layer(struct logical_volume *lv,
struct dm_list *removal_list) struct dm_list *removal_list)
{ {
struct lv_list *lvl_array, *lvl; struct lv_list *lvl_array, *lvl;
@ -196,7 +196,7 @@ static int raid_remove_top_layer(struct logical_volume *lv,
} }
/* /*
* clear_lv * _clear_lv
* @lv * @lv
* *
* If LV is active: * If LV is active:
@ -206,7 +206,7 @@ static int raid_remove_top_layer(struct logical_volume *lv,
* *
* Returns: 1 on success, 0 on failure * Returns: 1 on success, 0 on failure
*/ */
static int clear_lv(struct logical_volume *lv) static int _clear_lv(struct logical_volume *lv)
{ {
int was_active = lv_is_active(lv); int was_active = lv_is_active(lv);
@ -237,7 +237,7 @@ static int clear_lv(struct logical_volume *lv)
} }
/* Makes on-disk metadata changes */ /* Makes on-disk metadata changes */
static int clear_lvs(struct dm_list *lv_list) static int _clear_lvs(struct dm_list *lv_list)
{ {
struct lv_list *lvl; struct lv_list *lvl;
struct volume_group *vg = NULL; struct volume_group *vg = NULL;
@ -264,7 +264,7 @@ static int clear_lvs(struct dm_list *lv_list)
return_0; return_0;
dm_list_iterate_items(lvl, lv_list) dm_list_iterate_items(lvl, lv_list)
if (!clear_lv(lvl->lv)) if (!_clear_lv(lvl->lv))
return 0; return 0;
return 1; return 1;
@ -452,7 +452,7 @@ static int _alloc_image_components(struct logical_volume *lv,
return 1; return 1;
} }
static int raid_add_images(struct logical_volume *lv, static int _raid_add_images(struct logical_volume *lv,
uint32_t new_count, struct dm_list *pvs) uint32_t new_count, struct dm_list *pvs)
{ {
uint32_t s; uint32_t s;
@ -479,7 +479,7 @@ static int raid_add_images(struct logical_volume *lv,
} }
/* Metadata LVs must be cleared before being added to the array */ /* Metadata LVs must be cleared before being added to the array */
if (!clear_lvs(&meta_lvs)) if (!_clear_lvs(&meta_lvs))
goto fail; goto fail;
/* /*
@ -650,7 +650,7 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
} }
/* /*
* raid_extract_images * _raid_extract_images
* @lv * @lv
* @new_count: The absolute count of images (e.g. '2' for a 2-way mirror) * @new_count: The absolute count of images (e.g. '2' for a 2-way mirror)
* @target_pvs: The list of PVs that are candidates for removal * @target_pvs: The list of PVs that are candidates for removal
@ -666,7 +666,7 @@ static int _extract_image_components(struct lv_segment *seg, uint32_t idx,
* *
* Returns: 1 on success, 0 on failure * Returns: 1 on success, 0 on failure
*/ */
static int raid_extract_images(struct logical_volume *lv, uint32_t new_count, static int _raid_extract_images(struct logical_volume *lv, uint32_t new_count,
struct dm_list *target_pvs, int shift, struct dm_list *target_pvs, int shift,
struct dm_list *extracted_meta_lvs, struct dm_list *extracted_meta_lvs,
struct dm_list *extracted_data_lvs) struct dm_list *extracted_data_lvs)
@ -687,10 +687,10 @@ static int raid_extract_images(struct logical_volume *lv, uint32_t new_count,
return_0; return_0;
for (s = seg->area_count - 1; (s >= 0) && extract; s--) { for (s = seg->area_count - 1; (s >= 0) && extract; s--) {
if (!lv_is_on_pvs(seg_lv(seg, s), target_pvs) || if (!_lv_is_on_pvs(seg_lv(seg, s), target_pvs) ||
!lv_is_on_pvs(seg_metalv(seg, s), target_pvs)) !_lv_is_on_pvs(seg_metalv(seg, s), target_pvs))
continue; continue;
if (!raid_in_sync(lv) && if (!_raid_in_sync(lv) &&
(!seg_is_mirrored(seg) || (s == 0))) { (!seg_is_mirrored(seg) || (s == 0))) {
log_error("Unable to extract %sRAID image" log_error("Unable to extract %sRAID image"
" while RAID array is not in-sync", " while RAID array is not in-sync",
@ -724,7 +724,7 @@ static int raid_extract_images(struct logical_volume *lv, uint32_t new_count,
return 1; return 1;
} }
static int raid_remove_images(struct logical_volume *lv, static int _raid_remove_images(struct logical_volume *lv,
uint32_t new_count, struct dm_list *pvs) uint32_t new_count, struct dm_list *pvs)
{ {
struct dm_list removal_list; struct dm_list removal_list;
@ -732,7 +732,7 @@ static int raid_remove_images(struct logical_volume *lv,
dm_list_init(&removal_list); dm_list_init(&removal_list);
if (!raid_extract_images(lv, new_count, pvs, 1, if (!_raid_extract_images(lv, new_count, pvs, 1,
&removal_list, &removal_list)) { &removal_list, &removal_list)) {
log_error("Failed to extract images from %s/%s", log_error("Failed to extract images from %s/%s",
lv->vg->name, lv->name); lv->vg->name, lv->name);
@ -740,7 +740,7 @@ static int raid_remove_images(struct logical_volume *lv,
} }
/* Convert to linear? */ /* Convert to linear? */
if ((new_count == 1) && !raid_remove_top_layer(lv, &removal_list)) { if ((new_count == 1) && !_raid_remove_top_layer(lv, &removal_list)) {
log_error("Failed to remove RAID layer after linear conversion"); log_error("Failed to remove RAID layer after linear conversion");
return 0; return 0;
} }
@ -824,9 +824,9 @@ int lv_raid_change_image_count(struct logical_volume *lv,
} }
if (old_count > new_count) if (old_count > new_count)
return raid_remove_images(lv, new_count, pvs); return _raid_remove_images(lv, new_count, pvs);
return raid_add_images(lv, new_count, pvs); return _raid_add_images(lv, new_count, pvs);
} }
int lv_raid_split(struct logical_volume *lv, const char *split_name, int lv_raid_split(struct logical_volume *lv, const char *split_name,
@ -859,13 +859,13 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
return 0; return 0;
} }
if (!raid_in_sync(lv)) { if (!_raid_in_sync(lv)) {
log_error("Unable to split %s/%s while it is not in-sync.", log_error("Unable to split %s/%s while it is not in-sync.",
lv->vg->name, lv->name); lv->vg->name, lv->name);
return 0; return 0;
} }
if (!raid_extract_images(lv, new_count, splittable_pvs, 1, if (!_raid_extract_images(lv, new_count, splittable_pvs, 1,
&removal_list, &data_list)) { &removal_list, &data_list)) {
log_error("Failed to extract images from %s/%s", log_error("Failed to extract images from %s/%s",
lv->vg->name, lv->name); lv->vg->name, lv->name);
@ -873,7 +873,7 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
} }
/* Convert to linear? */ /* Convert to linear? */
if ((new_count == 1) && !raid_remove_top_layer(lv, &removal_list)) { if ((new_count == 1) && !_raid_remove_top_layer(lv, &removal_list)) {
log_error("Failed to remove RAID layer after linear conversion"); log_error("Failed to remove RAID layer after linear conversion");
return 0; return 0;
} }
@ -961,14 +961,14 @@ int lv_raid_split_and_track(struct logical_volume *lv,
return 0; return 0;
} }
if (!raid_in_sync(lv)) { if (!_raid_in_sync(lv)) {
log_error("Unable to split image from %s/%s while not in-sync", log_error("Unable to split image from %s/%s while not in-sync",
lv->vg->name, lv->name); lv->vg->name, lv->name);
return 0; return 0;
} }
for (s = seg->area_count - 1; s >= 0; s--) { for (s = seg->area_count - 1; s >= 0; s--) {
if (!lv_is_on_pvs(seg_lv(seg, s), splittable_pvs)) if (!_lv_is_on_pvs(seg_lv(seg, s), splittable_pvs))
continue; continue;
lv_set_visible(seg_lv(seg, s)); lv_set_visible(seg_lv(seg, s));
seg_lv(seg, s)->status &= ~LVM_WRITE; seg_lv(seg, s)->status &= ~LVM_WRITE;

View File

@ -43,8 +43,8 @@ static int _raid_text_import_area_count(const struct config_node *sn,
return 1; return 1;
} }
static int static int _raid_text_import_areas(struct lv_segment *seg,
_raid_text_import_areas(struct lv_segment *seg, const struct config_node *sn, const struct config_node *sn,
const struct config_node *cn) const struct config_node *cn)
{ {
unsigned int s; unsigned int s;
@ -100,8 +100,8 @@ _raid_text_import_areas(struct lv_segment *seg, const struct config_node *sn,
return 1; return 1;
} }
static int static int _raid_text_import(struct lv_segment *seg,
_raid_text_import(struct lv_segment *seg, const struct config_node *sn, const struct config_node *sn,
struct dm_hash_table *pv_hash) struct dm_hash_table *pv_hash)
{ {
const struct config_node *cn; const struct config_node *cn;
@ -139,8 +139,7 @@ _raid_text_import(struct lv_segment *seg, const struct config_node *sn,
return 1; return 1;
} }
static int static int _raid_text_export(const struct lv_segment *seg, struct formatter *f)
_raid_text_export(const struct lv_segment *seg, struct formatter *f)
{ {
outf(f, "device_count = %u", seg->area_count); outf(f, "device_count = %u", seg->area_count);
if (seg->region_size) if (seg->region_size)
@ -151,8 +150,7 @@ _raid_text_export(const struct lv_segment *seg, struct formatter *f)
return out_areas(f, seg, "raid"); return out_areas(f, seg, "raid");
} }
static int static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
_raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
struct dm_pool *mem __attribute__((unused)), struct dm_pool *mem __attribute__((unused)),
struct cmd_context *cmd __attribute__((unused)), struct cmd_context *cmd __attribute__((unused)),
void **target_state __attribute__((unused)), void **target_state __attribute__((unused)),
@ -245,8 +243,7 @@ static int _raid_target_percent(void **target_state,
} }
static int static int _raid_target_present(struct cmd_context *cmd,
_raid_target_present(struct cmd_context *cmd,
const struct lv_segment *seg __attribute__((unused)), const struct lv_segment *seg __attribute__((unused)),
unsigned *attributes __attribute__((unused))) unsigned *attributes __attribute__((unused)))
{ {
@ -261,8 +258,7 @@ _raid_target_present(struct cmd_context *cmd,
return _raid_present; return _raid_present;
} }
static int static int _raid_modules_needed(struct dm_pool *mem,
_raid_modules_needed(struct dm_pool *mem,
const struct lv_segment *seg __attribute__((unused)), const struct lv_segment *seg __attribute__((unused)),
struct dm_list *modules) struct dm_list *modules)
{ {