glusterd: log enhancements for volume start

* changed some of the log messages to give as much information as
  available in case of failure

* added logs to identify on which machine lock/stage/commit failed

* added macros to represent error strings to maintain uniformity
  among error messages for a given kind of error

* moved error logs wherever possible, from caller to callee to avoid
  code duplication

Change-Id: I0e98d5d3ba086c99240f2fbd642451f175f51942
BUG: 812356
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: http://review.gluster.org/4353
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Tested-by: Vijay Bellur <vbellur@redhat.com>
This commit is contained in:
Krutika Dhananjay 2012-12-06 13:08:11 +05:30 committed by Vijay Bellur
parent 90ee11d0c5
commit 129728f257
8 changed files with 188 additions and 78 deletions

View File

@ -2343,7 +2343,7 @@ out:
}
}
gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
return ret;
}

View File

@ -2328,16 +2328,17 @@ glusterd_dict_set_volid (dict_t *dict, char *volname, char **op_errstr)
glusterd_volinfo_t *volinfo = NULL;
char *volid = NULL;
char msg[1024] = {0,};
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
if (!dict || !volname)
goto out;
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
snprintf (msg, sizeof (msg), "Volume %s does not exist",
volname);
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
snprintf (msg, sizeof (msg), FMTSTR_CHECK_VOL_EXISTS, volname);
goto out;
}
volid = gf_strdup (uuid_utoa (volinfo->volume_id));
@ -2347,11 +2348,15 @@ glusterd_dict_set_volid (dict_t *dict, char *volname, char **op_errstr)
}
ret = dict_set_dynstr (dict, "vol-id", volid);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR,
"Failed to set volume id in dictionary");
snprintf (msg, sizeof (msg), "Failed to set volume id of volume"
" %s", volname);
goto out;
}
out:
if (msg[0] != '\0') {
gf_log (this->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = strdup (msg);
}
return ret;
}
@ -2366,7 +2371,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
char *volname = NULL;
uint32_t status_cmd = GF_CLI_STATUS_NONE;
char *errstr = NULL;
xlator_t *this = THIS;
xlator_t *this = NULL;
GF_ASSERT (req);
@ -2390,8 +2395,11 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
} else {
#define GD_SYNC_OPCODE_KEY "sync-mgmt-operation"
ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, (int32_t*)&op);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume"
" operation");
goto out;
}
ctx = op_ctx;
#undef GD_SYNC_OPCODE_KEY
}
@ -2403,8 +2411,12 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
++glusterfs_port;
ret = dict_set_int32 (dict, "port",
glusterfs_port);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to set port in "
"dictionary");
goto out;
}
dict_copy (dict, req_dict);
}
break;

View File

@ -49,10 +49,11 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
int32_t status = 0;
int32_t count = 0;
gf_cli_rsp rsp = {0,};
xlator_t *this = NULL;
GF_ASSERT (THIS);
conf = THIS->private;
this = THIS;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
@ -77,7 +78,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
if (ctx) {
ret = dict_get_int32 (ctx, "status", &status);
if (ret) {
gf_log (THIS->name, GF_LOG_TRACE,
gf_log (this->name, GF_LOG_TRACE,
"failed to get status");
}
}
@ -106,7 +107,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
if (ctx && dict_get_int32 (ctx, "count", &count)) {
ret = dict_set_int32 (ctx, "count", 0);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR,
gf_log (this->name, GF_LOG_ERROR,
"failed to set count in dictionary");
}
}
@ -115,13 +116,14 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_START_BRICK:
case GD_OP_STOP_BRICK:
{
gf_log ("", GF_LOG_DEBUG, "not supported op %d", op);
gf_log (this->name, GF_LOG_DEBUG, "op '%s' not supported",
gd_op_list[op]);
break;
}
case GD_OP_NONE:
case GD_OP_MAX:
{
gf_log ("", GF_LOG_ERROR, "invalid operation %d", op);
gf_log (this->name, GF_LOG_ERROR, "invalid operation");
break;
}
case GD_OP_CREATE_VOLUME:
@ -160,7 +162,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
ret = dict_allocate_and_serialize (ctx, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0 )
gf_log (THIS->name, GF_LOG_ERROR, "failed to "
gf_log (this->name, GF_LOG_ERROR, "failed to "
"serialize buffer");
else
free_ptr = rsp.dict.dict_val;
@ -178,7 +180,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
ret = 0;
GF_FREE (free_ptr);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}

View File

@ -484,12 +484,20 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
int32_t tmp_op = 0;
gf_boolean_t local_locked = _gf_false;
char *op_errstr = NULL;
xlator_t *this = NULL;
char *hostname = NULL;
conf = THIS->private;
this = THIS;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume "
"operation");
goto out;
}
op = tmp_op;
@ -501,7 +509,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
/* Lock everything */
ret = glusterd_lock (MY_UUID);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Unable to acquire lock");
gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock");
gf_asprintf (&op_errstr, "Another transaction is in progress. "
"Please try again after sometime.");
goto out;
@ -511,32 +519,51 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
INIT_LIST_HEAD (&conf->xaction_peers);
list_for_each_entry (peerinfo, &conf->peers, uuid_list) {
if (!peerinfo->connected)
continue;
if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
continue;
ret = gd_syncop_mgmt_lock (peerinfo->rpc,
MY_UUID, tmp_uuid);
if (ret == 0)
if (ret) {
gf_asprintf (&op_errstr, "Another transaction could be "
"in progress. Please try again after "
"sometime.");
gf_log (this->name, GF_LOG_ERROR, "Failed to acquire "
"lock on peer %s", peerinfo->hostname);
goto out;
} else {
list_add_tail (&peerinfo->op_peers_list,
&conf->xaction_peers);
}
}
ret = glusterd_op_build_payload (&req_dict, &op_errstr, op_ctx);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, LOGSTR_BUILD_PAYLOAD,
gd_op_list[op]);
if (op_errstr == NULL)
gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
goto out;
}
/* stage op */
ret = glusterd_op_stage_validate (op, req_dict, &op_errstr, rsp_dict);
if (ret)
goto out;
if (ret) {
hostname = "localhost";
goto stage_done;
}
list_for_each_entry (peerinfo, &conf->xaction_peers, op_peers_list) {
ret = gd_syncop_mgmt_stage_op (peerinfo->rpc,
MY_UUID, tmp_uuid,
op, req_dict, &rsp_dict,
&op_errstr);
if (ret)
goto out;
if (ret) {
hostname = peerinfo->hostname;
goto stage_done;
}
if (op == GD_OP_REPLACE_BRICK)
(void) glusterd_syncop_aggr_rsp_dict (op, op_ctx,
@ -547,23 +574,47 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
dict_unref (rsp_dict);
}
stage_done:
if (ret) {
gf_log (this->name, GF_LOG_ERROR, LOGSTR_STAGE_FAIL,
gd_op_list[op], hostname, (op_errstr) ? ":" : " ",
(op_errstr) ? op_errstr : " ");
if (op_errstr == NULL)
gf_asprintf (&op_errstr, OPERRSTR_STAGE_FAIL, hostname);
goto out;
}
/* commit op */
ret = glusterd_op_commit_perform (op, req_dict, &op_errstr, rsp_dict);
if (ret)
goto out;
if (ret) {
hostname = "localhost";
goto commit_done;
}
list_for_each_entry (peerinfo, &conf->xaction_peers, op_peers_list) {
ret = gd_syncop_mgmt_commit_op (peerinfo->rpc,
MY_UUID, tmp_uuid,
op, req_dict, &rsp_dict,
&op_errstr);
if (ret)
goto out;
if (ret) {
hostname = peerinfo->hostname;
goto commit_done;
}
(void) glusterd_syncop_aggr_rsp_dict (op, op_ctx, rsp_dict,
op_errstr);
if (rsp_dict)
dict_unref (rsp_dict);
}
commit_done:
if (ret) {
gf_log (this->name, GF_LOG_ERROR, LOGSTR_COMMIT_FAIL,
gd_op_list[op], hostname, (op_errstr) ? ":" : " ",
(op_errstr) ? op_errstr : " ");
if (op_errstr == NULL)
gf_asprintf (&op_errstr, OPERRSTR_COMMIT_FAIL,
hostname);
goto out;
}
ret = 0;
out:

View File

@ -255,13 +255,17 @@ glusterd_lock (uuid_t uuid)
char new_owner_str[50];
char owner_str[50];
int ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (uuid);
glusterd_get_lock_owner (&owner);
if (!uuid_is_null (owner)) {
gf_log ("glusterd", GF_LOG_ERROR, "Unable to get lock"
gf_log (this->name, GF_LOG_ERROR, "Unable to get lock"
" for uuid: %s, lock held by: %s",
uuid_utoa_r (uuid, new_owner_str),
uuid_utoa_r (owner, owner_str));
@ -271,7 +275,7 @@ glusterd_lock (uuid_t uuid)
ret = glusterd_set_lock_owner (uuid);
if (!ret) {
gf_log ("glusterd", GF_LOG_INFO, "Cluster lock held by"
gf_log (this->name, GF_LOG_DEBUG, "Cluster lock held by"
" %s", uuid_utoa (uuid));
}
@ -716,11 +720,15 @@ int32_t
glusterd_resolve_brick (glusterd_brickinfo_t *brickinfo)
{
int32_t ret = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (brickinfo);
ret = glusterd_hostname_to_uuid (brickinfo->hostname, brickinfo->uuid);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@ -978,6 +986,7 @@ glusterd_volinfo_find (char *volname, glusterd_volinfo_t **volinfo)
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
list_for_each_entry (tmp_volinfo, &priv->volumes, vol_list) {
if (!strcmp (tmp_volinfo->volname, volname)) {
@ -1193,14 +1202,15 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
GLUSTERD_GET_VOLUME_DIR (path, volinfo, priv);
snprintf (rundir, PATH_MAX, "%s/run", path);
ret = mkdir (rundir, 0777);
if ((ret == -1) && (EEXIST != errno)) {
gf_log ("", GF_LOG_ERROR, "Unable to create rundir %s",
rundir);
gf_log (this->name, GF_LOG_ERROR, "Unable to create rundir %s."
"Reason : %s", rundir, strerror (errno));
goto out;
}
@ -1214,7 +1224,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
ret = lockf (fileno (file), F_TLOCK, 0);
if (ret && ((EAGAIN == errno) || (EACCES == errno))) {
ret = 0;
gf_log ("", GF_LOG_INFO, "brick %s:%s "
gf_log (this->name, GF_LOG_DEBUG, "brick %s:%s "
"already started", brickinfo->hostname,
brickinfo->path);
goto connect;
@ -1230,7 +1240,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
ret = lockf (fileno (file), F_TLOCK, 0);
if (ret && ((EAGAIN == errno) || (EACCES == errno))) {
ret = 0;
gf_log ("", GF_LOG_INFO, "brick %s:%s "
gf_log (this->name, GF_LOG_DEBUG, "brick %s:%s "
"already started", brickinfo->hostname,
brickinfo->path);
goto connect;
@ -1244,7 +1254,7 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo,
}
unlink (pidfile);
gf_log ("", GF_LOG_INFO, "About to start glusterfs"
gf_log (this->name, GF_LOG_DEBUG, "About to start glusterfs"
" for brick %s:%s", brickinfo->hostname,
brickinfo->path);
GLUSTERD_REMOVE_SLASH_FROM_PATH (brickinfo->path, exp_path);
@ -1335,8 +1345,9 @@ connect:
out:
if (is_locked && file)
if (lockf (fileno (file), F_ULOCK, 0) < 0)
gf_log ("", GF_LOG_WARNING, "Cannot unlock pidfile: %s"
" reason: %s", pidfile, strerror(errno));
gf_log (this->name, GF_LOG_WARNING, "Cannot unlock "
"pidfile: %s reason: %s", pidfile,
strerror(errno));
if (file)
fclose (file);
return ret;
@ -3930,8 +3941,7 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
if (uuid_is_null (brickinfo->uuid)) {
ret = glusterd_resolve_brick (brickinfo);
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR,
"cannot resolve brick: %s:%s",
gf_log (this->name, GF_LOG_ERROR, FMTSTR_RESOLVE_BRICK,
brickinfo->hostname, brickinfo->path);
goto out;
}
@ -3943,13 +3953,13 @@ glusterd_brick_start (glusterd_volinfo_t *volinfo,
}
ret = glusterd_volume_start_glusterfs (volinfo, brickinfo, wait);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to start "
"glusterfs, ret: %d", ret);
gf_log (this->name, GF_LOG_ERROR, "Unable to start brick %s:%s",
brickinfo->hostname, brickinfo->path);
goto out;
}
out:
gf_log ("", GF_LOG_DEBUG, "returning %d ", ret);
gf_log (this->name, GF_LOG_DEBUG, "returning %d ", ret);
return ret;
}
@ -4716,7 +4726,7 @@ glusterd_hostname_to_uuid (char *hostname, uuid_t uuid)
}
out:
gf_log ("", GF_LOG_DEBUG, "returning %d", ret);
gf_log (this->name, GF_LOG_DEBUG, "returning %d", ret);
return ret;
}
@ -6013,21 +6023,29 @@ glusterd_validate_volume_id (dict_t *op_dict, glusterd_volinfo_t *volinfo)
int ret = -1;
char *volid_str = NULL;
uuid_t vol_uid = {0, };
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
ret = dict_get_str (op_dict, "vol-id", &volid_str);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to get volume id");
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume id for "
"volume %s", volinfo->volname);
goto out;
}
ret = uuid_parse (volid_str, vol_uid);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to parse uuid");
gf_log (this->name, GF_LOG_ERROR, "Failed to parse volume id "
"for volume %s", volinfo->volname);
goto out;
}
if (uuid_compare (vol_uid, volinfo->volume_id)) {
gf_log (THIS->name, GF_LOG_ERROR, "Volume ids are different. "
"Possibly a split brain among peers.");
gf_log (this->name, GF_LOG_ERROR, "Volume ids of volume %s - %s"
" and %s - are different. Possibly a split brain among "
"peers.", volinfo->volname, volid_str,
uuid_utoa (volinfo->volume_id));
ret = -1;
goto out;
}
@ -6210,6 +6228,10 @@ glusterd_to_cli (rpcsvc_request_t *req, gf_cli_rsp *arg, struct iovec *payload,
int op_ret = 0;
char *op_errstr = NULL;
int op_errno = 0;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
op_ret = arg->op_ret;
op_errstr = arg->op_errstr;
@ -6217,13 +6239,14 @@ glusterd_to_cli (rpcsvc_request_t *req, gf_cli_rsp *arg, struct iovec *payload,
ret = dict_get_str (dict, "cmd-str", &cmd);
if (ret)
gf_log ("glusterd", GF_LOG_ERROR, "Failed to get command string");
gf_log (this->name, GF_LOG_ERROR, "Failed to get command "
"string");
if (cmd) {
if (op_ret)
gf_cmd_log ("", "%s : FAILED %s %s", cmd,
(op_errstr)? ":":" ",
(op_errstr)? op_errstr: " ");
(op_errstr)? ":" : " ",
(op_errstr)? op_errstr : " ");
else
gf_cmd_log ("", "%s : SUCCESS", cmd);
}

View File

@ -239,9 +239,10 @@ glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
//failed to decode msg;
snprintf (errstr, sizeof (errstr), "Failed to decode message "
"received from cli");
req->rpc_err = GARBAGE_ARGS;
snprintf (errstr, sizeof (errstr), "Received garbage args");
gf_log (this->name, sizeof (errstr), "%s", errstr);
goto out;
}
@ -269,7 +270,7 @@ glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
goto out;
}
gf_log (this->name, GF_LOG_INFO, "Received start vol req"
gf_log (this->name, GF_LOG_DEBUG, "Received start vol req"
" for volume %s", volname);
ret = glusterd_op_begin_synctask (req, GD_OP_START_VOLUME, dict);
@ -861,14 +862,12 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
char msg[2048];
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
priv = THIS->private;
if (!priv) {
gf_log ("glusterd", GF_LOG_ERROR,
"priv is NULL");
ret = -1;
goto out;
}
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
ret = glusterd_op_start_volume_args_get (dict, &volname, &flags);
if (ret)
@ -877,18 +876,19 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
exists = glusterd_check_volume_exists (volname);
if (!exists) {
snprintf (msg, sizeof (msg), "Volume %s does not exist", volname);
gf_log ("", GF_LOG_ERROR, "%s",
msg);
snprintf (msg, sizeof (msg), FMTSTR_CHECK_VOL_EXISTS, volname);
gf_log (this->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
ret = -1;
} else {
ret = 0;
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, FMTSTR_CHECK_VOL_EXISTS,
volname);
goto out;
}
ret = glusterd_validate_volume_id (dict, volinfo);
if (ret)
@ -897,8 +897,7 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
ret = glusterd_resolve_brick (brickinfo);
if (ret) {
gf_log ("", GF_LOG_ERROR,
"Unable to resolve brick %s:%s",
gf_log (this->name, GF_LOG_ERROR, FMTSTR_RESOLVE_BRICK,
brickinfo->hostname, brickinfo->path);
goto out;
}
@ -907,7 +906,7 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
if (glusterd_is_volume_started (volinfo)) {
snprintf (msg, sizeof (msg), "Volume %s already"
" started", volname);
gf_log ("glusterd", GF_LOG_ERROR, "%s", msg);
gf_log (this->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
ret = -1;
goto out;
@ -917,7 +916,7 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
ret = 0;
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@ -1618,15 +1617,22 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
int flags = 0;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
ret = glusterd_op_start_volume_args_get (dict, &volname, &flags);
if (ret)
goto out;
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret)
if (ret) {
gf_log (this->name, GF_LOG_ERROR, FMTSTR_CHECK_VOL_EXISTS,
volname);
goto out;
}
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
ret = glusterd_brick_start (volinfo, brickinfo, _gf_true);
if (ret)
@ -1642,7 +1648,7 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
out:
gf_log ("", GF_LOG_DEBUG, "returning %d ", ret);
gf_log (this->name, GF_LOG_DEBUG, "returning %d ", ret);
return ret;
}

View File

@ -71,6 +71,8 @@ int rpcsvc_programs_count = (sizeof (all_programs) / sizeof (all_programs[0]));
const char *gd_op_list[GD_OP_MAX + 1] = {
[GD_OP_NONE] = "Invalid op",
[GD_OP_CREATE_VOLUME] = "Create",
[GD_OP_START_BRICK] = "Start Brick",
[GD_OP_STOP_BRICK] = "Stop Brick",
[GD_OP_DELETE_VOLUME] = "Delete",
[GD_OP_START_VOLUME] = "Start",
[GD_OP_STOP_VOLUME] = "Stop",

View File

@ -49,6 +49,20 @@
#define GLUSTERD_SERVER_QUORUM "server"
#define FMTSTR_CHECK_VOL_EXISTS "Volume %s does not exist"
#define FMTSTR_RESOLVE_BRICK "Could not find peer on which brick %s:%s resides"
#define LOGSTR_BUILD_PAYLOAD "Failed to build payload for operation 'Volume %s'"
#define LOGSTR_STAGE_FAIL "Staging of operation 'Volume %s' failed on %s %s %s"
#define LOGSTR_COMMIT_FAIL "Commit of operation 'Volume %s' failed on %s %s %s"
#define OPERRSTR_BUILD_PAYLOAD "Failed to build payload. Please check the log "\
"file for more details."
#define OPERRSTR_STAGE_FAIL "Staging failed on %s. Please check the log file " \
"for more details."
#define OPERRSTR_COMMIT_FAIL "Commit failed on %s. Please check the log file "\
"for more details."
struct glusterd_volinfo_;
typedef struct glusterd_volinfo_ glusterd_volinfo_t;