Revert "glusterd: add "volume label" command"

This reverts commit dad16a51ba7e6b1c57529423c57257dbce97ee93

Test script causing "silent" failures during execution.

Change-Id: I26dbb8ed22256071cb415cc3aff572ef8372600e
Reviewed-on: http://review.gluster.org/4268
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
This commit is contained in:
Anand Avati 2012-12-04 16:00:36 -08:00
parent 698deb33d7
commit 6d1607becc
10 changed files with 7 additions and 410 deletions

View File

@ -1790,62 +1790,6 @@ out:
return ret;
}
int
cli_cmd_volume_label_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount)
{
int ret = -1;
rpc_clnt_procedure_t *proc = NULL;
call_frame_t *frame = NULL;
dict_t *options = NULL;
int sent = 0;
int parse_error = 0;
cli_local_t *local = NULL;
frame = create_frame (THIS, THIS->ctx->pool);
if (!frame)
goto out;
if (wordcount != 4) {
cli_usage_out (word->pattern);
parse_error = 1;
goto out;
}
options = dict_new();
if (!options) {
cli_out ("Could not allocate dict for label_volume");
goto out;
}
ret = dict_set_str (options, "volname", (char *)words[2]);
if (ret)
goto out;
ret = dict_set_str (options, "brick", (char *)words[3]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LABEL_VOLUME];
CLI_LOCAL_INIT (local, words, frame, options);
if (proc->fn) {
ret = proc->fn (frame, THIS, options);
}
out:
if (ret) {
cli_cmd_sent_status_get (&sent);
if ((sent == 0) && (parse_error = 0))
cli_out ("Volume label failed");
}
CLI_STACK_DESTROY (frame);
return ret;
}
struct cli_cmd volume_cmds[] = {
{ "volume info [all|<VOLNAME>]",
cli_cmd_volume_info_cbk,
@ -1956,11 +1900,6 @@ struct cli_cmd volume_cmds[] = {
"Clear locks held on path"
},
{"volume label <VOLNAME> <BRICK>",
cli_cmd_volume_label_cbk,
"Add a volume label to an empty replacement brick"
},
{ NULL, NULL, NULL }
};

View File

@ -6253,66 +6253,6 @@ out:
return ret;
}
int32_t
gf_cli_label_volume_cbk (struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
dict_t *dict = NULL;
if (-1 == req->rpc_status)
goto out;
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log ("cli", GF_LOG_ERROR, "XDR decoding failed");
goto out;
}
gf_log ("cli", GF_LOG_DEBUG, "Received response to label");
if (rsp.op_ret) {
cli_err ("Volume label unsuccessful");
cli_err ("%s", rsp.op_errstr);
} else {
cli_out ("Volume label successful");
}
ret = rsp.op_ret;
out:
if (dict)
dict_unref (dict);
cli_cmd_broadcast_response (ret);
return ret;
}
int32_t
gf_cli_label_volume (call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{0,}};
dict_t *options = NULL;
int ret = -1;
if (!frame || !this || !data)
goto out;
options = data;
ret = cli_to_glusterd (&req, frame, gf_cli_label_volume_cbk,
(xdrproc_t) xdr_gf_cli_req, options,
GLUSTER_CLI_LABEL_VOLUME, this, cli_rpc_prog,
NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE (req.dict.dict_val);
return ret;
}
int
cli_to_glusterd (gf_cli_req *req, call_frame_t *frame,
fop_cbk_fn_t cbkfn, xdrproc_t xdrproc, dict_t *dict,
@ -6424,7 +6364,6 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
#ifdef HAVE_BD_XLATOR
[GLUSTER_CLI_BD_OP] = {"BD_OP", gf_cli_bd_op},
#endif
[GLUSTER_CLI_LABEL_VOLUME] = {"LABEL_VOLUME", gf_cli_label_volume},
};
struct rpc_clnt_program cli_prog = {

View File

@ -155,7 +155,6 @@ enum gluster_cli_procnum {
GLUSTER_CLI_CLRLOCKS_VOLUME,
GLUSTER_CLI_UUID_RESET,
GLUSTER_CLI_BD_OP,
GLUSTER_CLI_LABEL_VOLUME,
GLUSTER_CLI_MAXVALUE,
};

88
tests/bugs/bug-860297.t Executable file → Normal file
View File

@ -1,87 +1,13 @@
#!/bin/bash
. $(dirname $0)/../include.rc
cleanup
function recreate {
# The rm is necessary so we don't get fooled by leftovers from old runs.
rm -rf $1 && mkdir -p $1
}
function count_bricks {
local count
local pid
count=0
for pid in /var/lib/glusterd/vols/${1}/run/*pid; do
if kill -0 $(cat $pid); then
count=$((count+1))
fi
done
echo $count
}
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
## Start and create a volume
TEST recreate ${B0}/${V0}-0
TEST recreate ${B0}/${V0}-1
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
function volinfo_field()
{
local vol=$1;
local field=$2;
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
}
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
## Start volume and verify that all bricks start.
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
EXPECT 2 count_bricks $V0
TEST $CLI volume stop $V0
# Nuke one of the bricks and make sure it *doesn't* start.
TEST recreate ${B0}/${V0}-1
# We can't do the usual TEST/startup thing here because of another bug. If
# a server fails to start a brick, it won't start any others either. Since
# all of our bricks in testing are on one server, that means no bricks start
# and so the volume doesn't start either. Changing the order etc. doesn't
# help, because the attempted startup order is non-deterministic. Instead,
# we just don't rely on whether or not the volume starts; the brick count is
# sufficient for our purposes.
$CLI volume start $V0;
EXPECT 1 count_bricks $V0
# If we can't depend on the volume starting, we can't depend on it stopping
# either.
$CLI volume stop $V0
# Label the recreated brick and make sure it starts now.
TEST $CLI volume label $V0 ${H0}:${B0}/${V0}-1
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
EXPECT 2 count_bricks $V0
# Make sure we can mount and use the volume.
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
TEST dd if=/dev/zero of=$M0/block bs=4k count=1
if [ "$EXIT_EARLY" = "1" ]; then
exit 0;
fi
## Finish up
TEST umount $M0
TEST $CLI volume stop $V0;
EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
TEST $CLI volume info
TEST $CLI volume create $V0 $H0:$B0/brick1
setfattr -x trusted.glusterfs.volume-id $B0/brick1
## If Extended attribute trusted.glusterfs.volume-id is not present
## then volume should not be able to start
TEST ! $CLI volume start $V0;
cleanup;

View File

@ -2934,69 +2934,6 @@ out:
return ret;
}
int
glusterd_handle_cli_label_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
glusterd_op_t cli_op = GD_OP_LABEL_VOLUME;
char *volname = NULL;
dict_t *dict = NULL;
GF_ASSERT (req);
ret = -1;
if (!xdr_to_generic (req->msg[0], &cli_req,
(xdrproc_t)xdr_gf_cli_req)) {
req->rpc_err = GARBAGE_ARGS;
goto out;
}
if (cli_req.dict.dict_len) {
dict = dict_new ();
ret = dict_unserialize (cli_req.dict.dict_val,
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_log (THIS->name, GF_LOG_ERROR,
"failed to unserialize req-buffer to"
" dictionary");
goto out;
}
} else {
ret = -1;
gf_log (THIS->name, GF_LOG_ERROR, "Empty cli request.");
goto out;
}
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname");
goto out;
}
gf_log (THIS->name, GF_LOG_INFO, "Received label volume req "
"for volume %s", volname);
ret = glusterd_op_begin (req, cli_op, dict);
out:
glusterd_friend_sm ();
glusterd_op_sm ();
if (ret) {
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
dict, "operation failed");
if (dict)
dict_unref (dict);
}
free (cli_req.dict.dict_val);
return ret;
}
int
glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event,
@ -3302,9 +3239,6 @@ rpcsvc_actor_t gd_svc_cli_actors[] = {
#ifdef HAVE_BD_XLATOR
[GLUSTER_CLI_BD_OP] = {"BD_OP", GLUSTER_CLI_BD_OP, glusterd_handle_cli_bd_op, NULL, 0},
#endif
[GLUSTER_CLI_LABEL_VOLUME] = {"LABEL_VOLUME", GLUSTER_CLI_LABEL_VOLUME,
glusterd_handle_cli_label_volume, NULL,
0},
};
struct rpcsvc_program gd_svc_cli_prog = {

View File

@ -59,7 +59,6 @@ char glusterd_hook_dirnames[GD_OP_MAX][256] =
[GD_OP_LIST_VOLUME] = EMPTY,
[GD_OP_CLEARLOCKS_VOLUME] = EMPTY,
[GD_OP_DEFRAG_BRICK_VOLUME] = EMPTY,
[GD_OP_LABEL_VOLUME] = EMPTY,
};
#undef EMPTY

View File

@ -2373,7 +2373,6 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
#ifdef HAVE_BD_XLATOR
case GD_OP_BD_OP:
#endif
case GD_OP_LABEL_VOLUME:
{
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
@ -3503,23 +3502,15 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_statedump_volume (dict,
op_errstr);
break;
case GD_OP_CLEARLOCKS_VOLUME:
ret = glusterd_op_stage_clearlocks_volume (dict,
op_errstr);
break;
#ifdef HAVE_BD_XLATOR
case GD_OP_BD_OP:
ret = glusterd_op_stage_bd (dict, op_errstr);
break;
#endif
case GD_OP_LABEL_VOLUME:
ret = glusterd_op_stage_label_volume (dict, op_errstr);
break;
default:
gf_log ("", GF_LOG_ERROR, "Unknown op %d",
op);
@ -3615,17 +3606,11 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_CLEARLOCKS_VOLUME:
ret = glusterd_op_clearlocks_volume (dict, op_errstr);
break;
#ifdef HAVE_BD_XLATOR
case GD_OP_BD_OP:
ret = 0;
break;
#endif
case GD_OP_LABEL_VOLUME:
ret = glusterd_op_label_volume (dict, op_errstr);
break;
default:
gf_log ("", GF_LOG_ERROR, "Unknown op %d",
op);
@ -5401,7 +5386,6 @@ glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
#ifdef HAVE_BD_XLATOR
case GD_OP_BD_OP:
#endif
case GD_OP_LABEL_VOLUME:
dict_unref (ctx);
break;
default:

View File

@ -140,7 +140,6 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_CLEARLOCKS_VOLUME:
case GD_OP_HEAL_VOLUME:
case GD_OP_BD_OP:
case GD_OP_LABEL_VOLUME:
{
/*nothing specific to be done*/
break;

View File

@ -1241,52 +1241,6 @@ out:
return ret;
}
int
glusterd_op_stage_label_volume (dict_t *dict, char **op_errstr)
{
int ret = -1;
char *volname = NULL;
glusterd_volinfo_t *volinfo = NULL;
gf_boolean_t exists = _gf_false;
char msg[2048] = {0};
char *brick = NULL;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
goto out;
}
exists = glusterd_check_volume_exists (volname);
ret = glusterd_volinfo_find (volname, &volinfo);
if (!exists) {
snprintf (msg, sizeof (msg), "Volume %s does not exist",
volname);
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
ret = -1;
goto out;
}
ret = dict_get_str (dict, "brick", &brick);
if (ret) {
goto out;
}
ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, NULL);
if (ret) {
snprintf (msg, sizeof (msg), "Incorrect brick %s "
"for volume %s", brick, volname);
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
#ifdef HAVE_BD_XLATOR
int
glusterd_op_stage_bd (dict_t *dict, char **op_errstr)
@ -2099,73 +2053,3 @@ out:
return ret;
}
int
glusterd_op_label_volume (dict_t *dict, char **op_errstr)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
char *volname = NULL;
char *brick = NULL;
glusterd_brickinfo_t *tmpbrkinfo = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log ("", GF_LOG_ERROR, "volname not found");
goto out;
}
ret = dict_get_str (dict, "brick", &brick);
/* If no brick is specified, do log-rotate for
all the bricks in the volume */
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR, "no brick specified");
goto out;
}
ret = glusterd_brickinfo_new_from_brick (brick, &tmpbrkinfo);
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR,
"cannot get brickinfo from brick");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret)
goto out;
ret = -1;
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
if (uuid_compare (brickinfo->uuid, MY_UUID))
continue;
if ((strcmp (tmpbrkinfo->hostname, brickinfo->hostname) ||
strcmp (tmpbrkinfo->path,brickinfo->path)))
continue;
ret = sys_lsetxattr (brickinfo->path, GF_XATTR_VOL_ID_KEY,
volinfo->volume_id,
sizeof(volinfo->volume_id), XATTR_CREATE);
if (ret) {
gf_log ("glusterd", GF_LOG_ERROR,
"failed to set %s on %s: %s",
GF_XATTR_VOL_ID_KEY, brickinfo->path,
strerror(errno));
}
break;
}
out:
if (tmpbrkinfo)
glusterd_brickinfo_delete (tmpbrkinfo);
return ret;
}

View File

@ -79,7 +79,6 @@ typedef enum glusterd_op_ {
GD_OP_CLEARLOCKS_VOLUME,
GD_OP_DEFRAG_BRICK_VOLUME,
GD_OP_BD_OP,
GD_OP_LABEL_VOLUME,
GD_OP_MAX,
} glusterd_op_t;
@ -641,7 +640,6 @@ int32_t glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx);
int glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req);
int glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req);
int glusterd_handle_cli_label_volume (rpcsvc_request_t *req);
int glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
size_t len, int cmd, defrag_cbk_fn_t cbk);
@ -687,10 +685,6 @@ int glusterd_op_statedump_volume (dict_t *dict, char **op_errstr);
int glusterd_op_stage_clearlocks_volume (dict_t *dict, char **op_errstr);
int glusterd_op_clearlocks_volume (dict_t *dict, char **op_errstr);
int glusterd_op_stage_label_volume (dict_t *dict, char **op_errstr);
int glusterd_op_label_volume (dict_t *dict, char **op_errstr);
int glusterd_op_stage_bd (dict_t *dict, char **op_errstr);
/* misc */