tier: separation of attach-tier from add-brick

PROBLEM: Both attach tier and add brick have the same RPC
and set of code. This becomes a hurdle while tring to implement
add brick on a tiered volume.

FIX: This patch separates the add brick and attach tier
giving them separate RPCs.

Change-Id: Iec57e972be968a9ff00b15b507e56a4f6dc398a2
BUG: 1376326
Signed-off-by: hari gowtham <hgowtham@redhat.com>
Reviewed-on: https://review.gluster.org/15503
Smoke: Gluster Build System <jenkins@build.gluster.org>
Tested-by: hari gowtham <hari.gowtham005@gmail.com>
Reviewed-by: Samikshan Bairagya <samikshan@gmail.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
This commit is contained in:
hari gowtham 2017-01-23 16:38:00 +05:30 committed by Atin Mukherjee
parent 3023c4bd76
commit e5db980504
10 changed files with 442 additions and 8 deletions

View File

@ -1169,7 +1169,7 @@ do_cli_cmd_volume_attach_tier (struct cli_state *state,
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ATTACH_TIER];
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_TIER_BRICK];
CLI_LOCAL_INIT (local, words, frame, options);

View File

@ -2366,6 +2366,58 @@ out:
return ret;
}
int
gf_cli_add_tier_brick_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
char msg[1024] = {0,};
GF_VALIDATE_OR_GOTO ("cli", myframe, out);
if (-1 == req->rpc_status) {
goto out;
}
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
"Failed to decode xdr response");
goto out;
}
gf_log ("cli", GF_LOG_INFO, "Received resp to attach tier");
if (rsp.op_ret && strcmp (rsp.op_errstr, ""))
snprintf (msg, sizeof (msg), "%s", rsp.op_errstr);
else
snprintf (msg, sizeof (msg), "Attach tier %s",
(rsp.op_ret) ? "unsuccessful" : "successful");
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str ("volAttachTier", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
gf_log ("cli", GF_LOG_ERROR,
"Error outputting to xml");
goto out;
}
if (rsp.op_ret)
cli_err ("volume attach-tier: failed: %s", msg);
else
cli_out ("volume attach-tier: success");
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
free (rsp.dict.dict_val);
free (rsp.op_errstr);
return ret;
}
int
gf_cli_attach_tier_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
@ -2374,7 +2426,7 @@ gf_cli_attach_tier_cbk (struct rpc_req *req, struct iovec *iov,
int ret = -1;
char msg[1024] = {0,};
GF_ASSERT (myframe);
GF_VALIDATE_OR_GOTO ("cli", myframe, out);
if (-1 == req->rpc_status) {
goto out;
@ -4885,6 +4937,39 @@ out:
return ret;
}
int32_t
gf_cli_add_tier_brick (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = { {0,} };
int ret = 0;
dict_t *dict = NULL;
if (!frame || !this || !data) {
ret = -1;
goto out;
}
dict = data;
ret = cli_to_glusterd (&req, frame, gf_cli_add_tier_brick_cbk,
(xdrproc_t) xdr_gf_cli_req, dict,
GLUSTER_CLI_ADD_TIER_BRICK, this,
cli_rpc_prog, NULL);
if (ret) {
gf_log ("cli", GF_LOG_ERROR, "Failed to send request to "
"glusterd");
goto out;
}
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE (req.dict.dict_val);
return ret;
}
int32_t
gf_cli_attach_tier (call_frame_t *frame, xlator_t *this,
void *data)
@ -11961,7 +12046,8 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_TIER] = {"TIER", gf_cli_tier},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", gf_cli_get_state},
[GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", gf_cli_reset_brick},
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"DETACH_TIER", gf_cli_remove_tier_brick}
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"DETACH_TIER", gf_cli_remove_tier_brick},
[GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK", gf_cli_add_tier_brick}
};
struct rpc_clnt_program cli_prog = {

View File

@ -201,6 +201,7 @@ enum gluster_cli_procnum {
GLUSTER_CLI_GET_STATE,
GLUSTER_CLI_RESET_BRICK,
GLUSTER_CLI_REMOVE_TIER_BRICK,
GLUSTER_CLI_ADD_TIER_BRICK,
GLUSTER_CLI_MAXVALUE,
};

View File

@ -2593,6 +2593,86 @@ out:
return ret;
}
int
glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr)
{
int ret = 0;
char *volname = NULL;
glusterd_conf_t *priv = NULL;
glusterd_volinfo_t *volinfo = NULL;
xlator_t *this = NULL;
char *bricks = NULL;
int32_t count = 0;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
priv = this->private;
GF_VALIDATE_OR_GOTO (this->name, priv, out);
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
GD_MSG_VOL_NOT_FOUND, "Volume not found");
goto out;
}
ret = dict_get_int32 (dict, "count", &count);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "Unable to get count");
goto out;
}
ret = dict_get_str (dict, "bricks", &bricks);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "Unable to get bricks");
goto out;
}
if (dict_get(dict, "attach-tier")) {
gf_msg_debug (THIS->name, 0, "Adding tier");
glusterd_op_perform_attach_tier (dict, volinfo, count, bricks);
}
ret = glusterd_op_perform_add_bricks (volinfo, count, bricks, dict);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_BRICK_ADD_FAIL, "Unable to add bricks");
goto out;
}
if (priv->op_version <= GD_OP_VERSION_3_10_0) {
ret = glusterd_store_volinfo (volinfo,
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret)
goto out;
} else {
/*
* The cluster is operating at version greater than
* gluster-3.10.0. So no need to store volfiles
* in commit phase, the same will be done
* in post validate phase with v3 framework.
*/
}
if (GLUSTERD_STATUS_STARTED == volinfo->status)
ret = glusterd_svcs_manager (volinfo);
out:
return ret;
}
void
glusterd_op_perform_detach_tier (glusterd_volinfo_t *volinfo)
{
@ -3110,6 +3190,197 @@ out:
return ret;
}
int
__glusterd_handle_add_tier_brick (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,} };
dict_t *dict = NULL;
char *bricks = NULL;
char *volname = NULL;
int brick_count = 0;
void *cli_rsp = NULL;
char err_str[2048] = {0,};
gf_cli_rsp rsp = {0,};
glusterd_volinfo_t *volinfo = NULL;
xlator_t *this = NULL;
int32_t replica_count = 0;
int32_t arbiter_count = 0;
int type = 0;
this = THIS;
GF_VALIDATE_OR_GOTO ("glusterd", this, out);
GF_VALIDATE_OR_GOTO (this->name, req, out);
ret = xdr_to_generic (req->msg[0], &cli_req,
(xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
/*failed to decode msg*/
req->rpc_err = GARBAGE_ARGS;
snprintf (err_str, sizeof (err_str), "Garbage args received");
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_GARBAGE_ARGS, "%s", err_str);
goto out;
}
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req");
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
ret = dict_unserialize (cli_req.dict.dict_val,
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_UNSERIALIZE_FAIL,
"failed to "
"unserialize req-buffer to dictionary");
snprintf (err_str, sizeof (err_str), "Unable to decode "
"the command");
goto out;
}
}
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
snprintf (err_str, sizeof (err_str), "Unable to get volume "
"name");
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
goto out;
}
if (!glusterd_check_volume_exists (volname)) {
snprintf (err_str, sizeof (err_str), "Volume %s does not exist",
volname);
gf_msg (this->name, GF_LOG_ERROR, EINVAL,
GD_MSG_VOL_NOT_FOUND, "%s", err_str);
ret = -1;
goto out;
}
ret = dict_get_int32 (dict, "count", &brick_count);
if (ret) {
snprintf (err_str, sizeof (err_str), "Unable to get volume "
"brick count");
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
goto out;
}
ret = dict_get_int32 (dict, "replica-count", &replica_count);
if (!ret) {
gf_msg (this->name, GF_LOG_INFO, errno,
GD_MSG_DICT_GET_SUCCESS, "replica-count is %d",
replica_count);
}
ret = dict_get_int32 (dict, "arbiter-count", &arbiter_count);
if (!ret) {
gf_msg (this->name, GF_LOG_INFO, errno,
GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d",
arbiter_count);
}
if (!dict_get (dict, "force")) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "Failed to get flag");
ret = -1;
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
snprintf (err_str, sizeof (err_str), "Unable to get volinfo "
"for volume name %s", volname);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_VOLINFO_GET_FAIL, "%s", err_str);
goto out;
}
if (glusterd_is_tiering_supported(err_str) == _gf_false) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_VERSION_UNSUPPORTED,
"Tiering not supported at this version");
ret = -1;
goto out;
}
if (dict_get (dict, "attach-tier")) {
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
snprintf (err_str, sizeof (err_str),
"Volume %s is already a tier.", volname);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_VOL_ALREADY_TIER, "%s", err_str);
ret = -1;
goto out;
}
ret = dict_get_int32 (dict, "hot-type", &type);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED,
"failed to get type from dictionary");
goto out;
}
}
ret = dict_get_str (dict, "bricks", &bricks);
if (ret) {
snprintf (err_str, sizeof (err_str), "Unable to get volume "
"bricks");
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
goto out;
}
if (type != volinfo->type) {
ret = dict_set_int32 (dict, "type", type);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_SET_FAILED,
"failed to set the new type in dict");
goto out;
}
}
ret = glusterd_mgmt_v3_initiate_all_phases (req,
GD_OP_ADD_TIER_BRICK,
dict);
out:
if (ret) {
rsp.op_ret = -1;
rsp.op_errno = 0;
if (err_str[0] == '\0')
snprintf (err_str, sizeof (err_str),
"Operation failed");
rsp.op_errstr = err_str;
cli_rsp = &rsp;
glusterd_to_cli (req, cli_rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gf_cli_rsp, dict);
ret = 0; /*sent error to cli, prevent second reply*/
}
free (cli_req.dict.dict_val); /*its malloced by xdr*/
return ret;
}
int
glusterd_handle_add_tier_brick (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
__glusterd_handle_add_tier_brick);
}
int
glusterd_handle_attach_tier (rpcsvc_request_t *req)
{

View File

@ -6257,6 +6257,7 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", GLUSTER_CLI_RESET_BRICK, glusterd_handle_reset_brick, NULL, 0, DRC_NA},
[GLUSTER_CLI_TIER] = {"TIER", GLUSTER_CLI_TIER, glusterd_handle_tier, NULL, 0, DRC_NA},
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK", GLUSTER_CLI_REMOVE_TIER_BRICK, glusterd_handle_tier, NULL, 0, DRC_NA},
[GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK", GLUSTER_CLI_ADD_TIER_BRICK, glusterd_handle_add_tier_brick, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {

View File

@ -41,7 +41,7 @@
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
#define GLFS_NUM_MESSAGES 606
#define GLFS_NUM_MESSAGES 608
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
@ -4815,9 +4815,6 @@
* @recommendedaction
*
*/
/*------------*/
#define GD_MSG_BRICK_MX_SET_FAIL (GLUSTERD_COMP_BASE + 596)
/*!
* @messageid
@ -4835,7 +4832,6 @@
*/
#define GD_MSG_TIER_WATERMARK_RESET_FAIL (GLUSTERD_COMP_BASE + 598)
/*!
* @messageid
* @diagnosis
@ -4909,6 +4905,14 @@
*/
#define GD_MSG_STATVFS_FAILED (GLUSTERD_COMP_BASE + 607)
/*!
* @messageid
* @diagnosis
* @recommendedaction
*
*/
#define GD_MSG_GARBAGE_ARGS (GLUSTERD_COMP_BASE + 608)
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"

View File

@ -169,6 +169,7 @@ gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
goto out;
}
break;
case GD_OP_ADD_TIER_BRICK:
case GD_OP_ADD_BRICK:
ret = glusterd_op_stage_add_brick (dict, op_errstr, rsp_dict);
if (ret) {
@ -391,6 +392,19 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
"tier status commit failed");
goto out;
}
break;
}
case GD_OP_ADD_TIER_BRICK:
{
ret = glusterd_op_add_tier_brick (dict, op_errstr);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_COMMIT_OP_FAIL,
"tier add-brick commit failed.");
goto out;
}
break;
}
default:
@ -492,6 +506,54 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
}
break;
}
case GD_OP_ADD_TIER_BRICK:
{
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "Unable to get"
" volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
GD_MSG_VOL_NOT_FOUND, "Unable to "
"allocate memory");
goto out;
}
ret = glusterd_create_volfiles_and_notify_services (
volinfo);
if (ret)
goto out;
ret = glusterd_store_volinfo (volinfo,
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret)
goto out;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_msg ("glusterd", GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "Unable to get"
" volume name");
goto out;
}
volinfo->is_tier_enabled = _gf_true;
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_DICT_SET_FAILED, "dict set "
"failed");
goto out;
}
ret = -1;
svc = &(volinfo->tierd.svc);
ret = svc->manager (svc, volinfo,
PROC_START_NO_WAIT);
if (ret)
goto out;
}
default:
break;
@ -755,6 +817,7 @@ glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
break;
case GD_OP_START_VOLUME:
case GD_OP_ADD_BRICK:
case GD_OP_ADD_TIER_BRICK:
ret = glusterd_aggr_brick_mount_dirs (aggr, rsp);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
@ -1085,6 +1148,7 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
case GD_OP_ADD_BRICK:
case GD_OP_REPLACE_BRICK:
case GD_OP_RESET_BRICK:
case GD_OP_ADD_TIER_BRICK:
{
ret = dict_get_str (dict, "volname", &volname);
if (ret) {

View File

@ -151,6 +151,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_TIER_START_STOP:
case GD_OP_DETACH_NOT_STARTED:
case GD_OP_GANESHA:
case GD_OP_ADD_TIER_BRICK:
{
/*nothing specific to be done*/

View File

@ -236,6 +236,7 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
case GD_OP_CREATE_VOLUME:
case GD_OP_ADD_BRICK:
case GD_OP_START_VOLUME:
case GD_OP_ADD_TIER_BRICK:
ret = glusterd_aggr_brick_mount_dirs (aggr, rsp);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,

View File

@ -130,6 +130,7 @@ typedef enum glusterd_op_ {
GD_OP_DETACH_TIER_STATUS,
GD_OP_DETACH_NOT_STARTED,
GD_OP_REMOVE_TIER_BRICK,
GD_OP_ADD_TIER_BRICK,
GD_OP_MAX,
} glusterd_op_t;
@ -953,6 +954,9 @@ glusterd_handle_attach_tier (rpcsvc_request_t *req);
int
glusterd_handle_detach_tier (rpcsvc_request_t *req);
int
glusterd_handle_add_tier_brick (rpcsvc_request_t *req);
int
glusterd_handle_replace_brick (rpcsvc_request_t *req);
@ -1142,6 +1146,7 @@ int glusterd_op_delete_volume (dict_t *dict);
int manage_export_config (char *volname, char *value, char **op_errstr);
int glusterd_op_add_brick (dict_t *dict, char **op_errstr);
int glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr);
int glusterd_op_remove_brick (dict_t *dict, char **op_errstr);
int glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr,
dict_t *rsp_dict);