Tiering: change in status for remove brick and rebalance

when we trigger a detach tier start on a tier vol,
it shows in the volume status task as "remove brick" instead of "Detach tier"

Status of volume: vol1
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Hot Bricks:
Brick 10.70.42.171:/data/gluster/hbr1       49154     0          Y       25098
Cold Bricks:
Brick 10.70.42.171:/data/gluster/p1         49152     0          Y       25101
Brick 10.70.42.171:/data/gluster/p2         49153     0          Y       25112
NFS Server on localhost                     N/A       N/A        N       N/A

Task Status of Volume vol1
------------------------------------------------------------------------------
Task                 : Tier migrate
ID                   : e11d5a3d-b1ae-4c3f-8f95-b28993c60939
Status               : in progress

Status of volume: vol1
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Hot Bricks:
Brick 10.70.42.171:/data/gluster/hbr1       49154     0          Y       25098
Cold Bricks:
Brick 10.70.42.171:/data/gluster/p1         49152     0          Y       25101
Brick 10.70.42.171:/data/gluster/p2         49153     0          Y       25112
NFS Server on localhost                     N/A       N/A        N       N/A

Task Status of Volume vol1
------------------------------------------------------------------------------
Task                 : Detach tier
ID                   : 76d700b1-5bbd-43ed-95fd-1640b2b4af31
Status               : completed

Change-Id: I4bd3b340d4e700e8afed00e1478b8a8b54dfe2e2
BUG: 1261837
Signed-off-by: hari gowtham <hgowtham@redhat.com>
Signed-off-by: Hari Gowtham <hgowtham@redhat.com>
Reviewed-on: http://review.gluster.org/12149
Tested-by: NetBSD Build System <jenkins@build.gluster.org>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Dan Lambright <dlambrig@redhat.com>
Tested-by: Dan Lambright <dlambrig@redhat.com>
This commit is contained in:
hari gowtham 2015-09-09 19:17:17 +05:30 committed by Dan Lambright
parent bc11be7864
commit 2ebfc3d0a6
4 changed files with 28 additions and 12 deletions

View File

@ -2955,6 +2955,7 @@ _add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
GF_ASSERT (this);
switch (op) {
case GD_OP_DETACH_TIER:
case GD_OP_REMOVE_BRICK:
snprintf (key, sizeof (key), "task%d", index);
ret = _add_remove_bricks_to_dict (dict, volinfo, key);
@ -2964,6 +2965,7 @@ _add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
"Failed to add remove bricks to dict");
goto out;
}
case GD_OP_TIER_MIGRATE:
case GD_OP_REBALANCE:
uuid_str = gf_strdup (uuid_utoa (volinfo->rebal.rebalance_id));
status = volinfo->rebal.defrag_status;
@ -3027,8 +3029,19 @@ glusterd_aggregate_task_status (dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
GF_ASSERT (this);
if (!gf_uuid_is_null (volinfo->rebal.rebalance_id)) {
ret = _add_task_to_dict (rsp_dict, volinfo, volinfo->rebal.op,
tasks);
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
if (volinfo->rebal.op == GD_OP_REMOVE_BRICK)
ret = _add_task_to_dict (rsp_dict, volinfo,
GD_OP_DETACH_TIER,
tasks);
else if (volinfo->rebal.op == GD_OP_REBALANCE)
ret = _add_task_to_dict (rsp_dict, volinfo,
GD_OP_TIER_MIGRATE,
tasks);
} else
ret = _add_task_to_dict (rsp_dict, volinfo,
volinfo->rebal.op, tasks);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
@ -4499,16 +4512,15 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
goto out;
for (i = 0; i <= brick_index_max; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.rdma_port", i);
ret = dict_get_str (op_ctx, key, &port);
if (ret) {
ret = dict_set_str (op_ctx, key, "\0");
if (ret)
goto out;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "brick%d.rdma_port", i);
ret = dict_get_str (op_ctx, key, &port);
if (ret) {
ret = dict_set_str (op_ctx, key, "\0");
if (ret)
goto out;
}
}
glusterd_volinfo_find (volname, &volinfo);
if (conf->op_version < GD_OP_VERSION_3_7_0 &&
volinfo->transport_type == GF_TRANSPORT_RDMA) {
@ -5425,7 +5437,6 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_RESET_VOLUME:
ret = glusterd_op_stage_reset_volume (dict, op_errstr);
break;
case GD_OP_REMOVE_BRICK:
ret = glusterd_op_stage_remove_brick (dict, op_errstr);
break;
@ -6899,7 +6910,6 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_bricks_select_stop_volume (dict, op_errstr,
selected);
break;
case GD_OP_REMOVE_BRICK:
ret = glusterd_bricks_select_remove_brick (dict, op_errstr,
selected);

View File

@ -58,6 +58,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
ctx = op_ctx;
switch (op) {
case GD_OP_DETACH_TIER:
case GD_OP_REMOVE_BRICK:
{
if (ctx)
@ -70,6 +71,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
errstr = "Error while resetting options";
break;
}
case GD_OP_TIER_MIGRATE:
case GD_OP_REBALANCE:
case GD_OP_DEFRAG_BRICK_VOLUME:
{

View File

@ -101,6 +101,8 @@ const char *gd_op_list[GD_OP_MAX + 1] = {
[GD_OP_STOP_VOLUME] = "Stop",
[GD_OP_DEFRAG_VOLUME] = "Rebalance",
[GD_OP_ADD_BRICK] = "Add brick",
[GD_OP_DETACH_TIER] = "Detach tier",
[GD_OP_TIER_MIGRATE] = "Tier migration",
[GD_OP_REMOVE_BRICK] = "Remove brick",
[GD_OP_REPLACE_BRICK] = "Replace brick",
[GD_OP_SET_VOLUME] = "Set",

View File

@ -88,6 +88,8 @@ typedef enum glusterd_op_ {
GD_OP_DEFRAG_VOLUME,
GD_OP_ADD_BRICK,
GD_OP_REMOVE_BRICK,
GD_OP_DETACH_TIER,
GD_OP_TIER_MIGRATE,
GD_OP_REPLACE_BRICK,
GD_OP_SET_VOLUME,
GD_OP_RESET_VOLUME,