glusterd: gluster volume status should show status of bitrot and scrubber daemon

Command gluster volume status <VOLNAME> should show the status of bitrot
and scrubber daemon and its pid information.

Along with displaying bitrot and scrubber daemon information in gluster
volume status command there should be command to show its individual status
separately.
Command to show individual status of bitrot and scrubber daemon will
following.

command to show only bitd daemon information will be
gluster volume status <VOLNAME> bitd

command to show only scrubber daemon information
gluster volume status <VOLNAME> scrub

Change-Id: Id86aae1156c8c599347c98e2a538f294d37376e4
BUG: 1209752
Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com>
Reviewed-on: http://review.gluster.org/10175
Reviewed-by: Kaushal M <kaushal@redhat.com>
Tested-by: Kaushal M <kaushal@redhat.com>
This commit is contained in:
Gaurav Kumar Garg 2015-04-09 15:36:20 +05:30 committed by Kaushal M
parent 7759748915
commit da1416051d
9 changed files with 230 additions and 18 deletions

View File

@ -2940,6 +2940,10 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
cmd |= GF_CLI_STATUS_QUOTAD;
} else if (!strcmp (words[3], "snapd")) {
cmd |= GF_CLI_STATUS_SNAPD;
} else if (!strcmp (words[3], "bitd")) {
cmd |= GF_CLI_STATUS_BITD;
} else if (!strcmp (words[3], "scrub")) {
cmd |= GF_CLI_STATUS_SCRUB;
} else {
cmd = GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick",

View File

@ -7007,7 +7007,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
}
if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
(cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD))
(cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
(cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
notbrick = _gf_true;
if (global_state->mode & GLUSTER_MODE_XML) {
@ -7131,7 +7132,9 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
if (!strcmp (hostname, "NFS Server") ||
!strcmp (hostname, "Self-heal Daemon") ||
!strcmp (hostname, "Quota Daemon") ||
!strcmp (hostname, "Snapshot Daemon"))
!strcmp (hostname, "Snapshot Daemon") ||
!strcmp (hostname, "Scrubber Daemon") ||
!strcmp (hostname, "Bitrot Daemon"))
snprintf (status.brick, PATH_MAX + 255, "%s on %s",
hostname, path);
else {

View File

@ -914,6 +914,10 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&node_name, "%s", "glustershd");
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf (&node_name, "%s", "quotad");
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
ret = gf_asprintf (&node_name, "%s", "bitd");
else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
ret = gf_asprintf (&node_name, "%s", "scrubber");
else {
ret = -1;
@ -939,6 +943,10 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf (&subvol_name, "%s", volname);
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
ret = gf_asprintf (&subvol_name, "%s", volname);
else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
ret = gf_asprintf (&subvol_name, "%s", volname);
else {
ret = -1;
goto out;

View File

@ -146,22 +146,24 @@ enum gf1_cli_top_op {
/* The unconventional hex numbers help us perform
bit-wise operations which reduces complexity */
enum gf_cli_status_type {
GF_CLI_STATUS_NONE = 0x0000,
GF_CLI_STATUS_MEM = 0x0001, /*00000000000001*/
GF_CLI_STATUS_CLIENTS = 0x0002, /*00000000000010*/
GF_CLI_STATUS_INODE = 0x0004, /*00000000000100*/
GF_CLI_STATUS_FD = 0x0008, /*00000000001000*/
GF_CLI_STATUS_CALLPOOL = 0x0010, /*00000000010000*/
GF_CLI_STATUS_DETAIL = 0x0020, /*00000000100000*/
GF_CLI_STATUS_TASKS = 0x0040, /*0000001000000*/
GF_CLI_STATUS_MASK = 0x00FF, /*00000011111111 Used to get the op*/
GF_CLI_STATUS_VOL = 0x0100, /*00000100000000*/
GF_CLI_STATUS_ALL = 0x0200, /*00001000000000*/
GF_CLI_STATUS_BRICK = 0x0400, /*00010000000000*/
GF_CLI_STATUS_NFS = 0x0800, /*00100000000000*/
GF_CLI_STATUS_SHD = 0x1000, /*01000000000000*/
GF_CLI_STATUS_QUOTAD = 0x2000, /*10000000000000*/
GF_CLI_STATUS_SNAPD = 0x4000 /*100000000000000*/
GF_CLI_STATUS_NONE = 0x000000,
GF_CLI_STATUS_MEM = 0x000001, /*000000000000001*/
GF_CLI_STATUS_CLIENTS = 0x000002, /*000000000000010*/
GF_CLI_STATUS_INODE = 0x000004, /*000000000000100*/
GF_CLI_STATUS_FD = 0x000008, /*000000000001000*/
GF_CLI_STATUS_CALLPOOL = 0x000010, /*000000000010000*/
GF_CLI_STATUS_DETAIL = 0x000020, /*000000000100000*/
GF_CLI_STATUS_TASKS = 0x000040, /*00000001000000*/
GF_CLI_STATUS_MASK = 0x0000FF, /*000000011111111 Used to get the op*/
GF_CLI_STATUS_VOL = 0x000100, /*00000000100000000*/
GF_CLI_STATUS_ALL = 0x000200, /*00000001000000000*/
GF_CLI_STATUS_BRICK = 0x000400, /*00000010000000000*/
GF_CLI_STATUS_NFS = 0x000800, /*00000100000000000*/
GF_CLI_STATUS_SHD = 0x001000, /*00001000000000000*/
GF_CLI_STATUS_QUOTAD = 0x002000, /*00010000000000000*/
GF_CLI_STATUS_SNAPD = 0x004000, /*00100000000000000*/
GF_CLI_STATUS_BITD = 0x008000, /*01000000000000000*/
GF_CLI_STATUS_SCRUB = 0x010000 /*10000000000000000*/
};
/* Identifiers for snapshot clis */

View File

@ -0,0 +1,67 @@
#!/bin/bash
## Test case for bitrot
## gluster volume status command should show status of bitrot daemon
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
## Start a 2 node virtual cluster
TEST launch_cluster 2;
## Peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
## Lets create and start the volume
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
TEST $CLI_1 volume start $V0
## Enable bitrot on volume $V0
TEST $CLI_1 volume bitrot $V0 enable
## From node 1 Gluster volume status command should show the status of bitrot
## daemon of all the nodes. there are 2 nodes in a cluster with having brick
## ${V0}1 and ${V0}2 . So there should be 2 bitrot daemon running.
bitd=$($CLI_1 volume status $V0 | grep "Bitrot Daemon" | grep -v grep | wc -l)
TEST [ "$bitd" -eq 2 ];
## From node 2 Gluster volume status command should show the status of Scrubber
## daemon of all the nodes. There are 2 nodes in a cluster with having brick
## ${V0}1 and ${V0}2 . So there should be 2 Scrubber daemon running.
scrub=$($CLI_2 volume status $V0 | grep "Scrubber Daemon" | grep -v grep | \
wc -l)
TEST [ "$scrub" -eq 2 ];
## From node 1 Gluster volume status command should print status of only
## scrubber daemon. There should be total 2 scrubber daemon running, one daemon
## for each node
scrub=$($CLI_1 volume status $V0 scrub | grep "Scrubber Daemon" | \
grep -v grep | wc -l)
TEST [ "$scrub" -eq 2 ];
## From node 2 Gluster volume status command should print status of only
## bitd daemon. There should be total 2 bitd daemon running, one daemon
## for each node
bitd=$($CLI_2 volume status $V0 bitd | grep "Bitrot Daemon" | \
grep -v grep | wc -l)
TEST [ "$bitd" -eq 2 ];
cleanup;

View File

@ -3987,6 +3987,26 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
goto out;
}
if ((cmd & GF_CLI_STATUS_BITD) &&
(conf->op_version < GD_OP_VERSION_3_7_0)) {
snprintf (err_str, sizeof (err_str), "The cluster is operating "
"at a lesser version than %d. Getting the status of "
"bitd is not allowed in this state",
GD_OP_VERSION_3_7_0);
ret = -1;
goto out;
}
if ((cmd & GF_CLI_STATUS_SCRUB) &&
(conf->op_version < GD_OP_VERSION_3_7_0)) {
snprintf (err_str, sizeof (err_str), "The cluster is operating "
"at a lesser version than %d. Getting the status of "
"scrub is not allowed in this state",
GD_OP_VERSION_3_7_0);
ret = -1;
goto out;
}
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
out:

View File

@ -1399,6 +1399,22 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
"quota enabled", volname);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
if (!glusterd_is_bitrot_enabled (volinfo)) {
ret = -1;
snprintf (msg, sizeof (msg), "Volume %s does not have "
"bitrot enabled", volname);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
if (!glusterd_is_bitrot_enabled (volinfo)) {
ret = -1;
snprintf (msg, sizeof (msg), "Volume %s does not have "
"bitrot enabled. Scrubber will be enabled "
"automatically if bitrot is enabled",
volname);
goto out;
}
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!glusterd_is_snapd_enabled (volinfo)) {
ret = -1;
@ -2776,6 +2792,20 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
ret = glusterd_add_node_to_dict (priv->bitd_svc.name,
rsp_dict, 0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
ret = glusterd_add_node_to_dict (priv->scrub_svc.name,
rsp_dict, 0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
ret = glusterd_add_node_to_dict ("snapd", rsp_dict, 0,
vol_opts);
@ -2881,6 +2911,34 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
other_index++;
}
if (glusterd_is_bitrot_enabled (volinfo)) {
ret = glusterd_add_node_to_dict
(priv->bitd_svc.name,
rsp_dict,
other_index,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
other_index++;
}
/* For handling scrub status. Scrub daemon will be
* running automatically when bitrot is enable*/
if (glusterd_is_bitrot_enabled (volinfo)) {
ret = glusterd_add_node_to_dict
(priv->scrub_svc.name,
rsp_dict,
other_index,
vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
}
}
}
@ -5998,6 +6056,8 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
case GF_CLI_STATUS_SNAPD:
case GF_CLI_STATUS_BITD:
case GF_CLI_STATUS_SCRUB:
break;
default:
goto out;
@ -6097,6 +6157,44 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
pending_node->index = 0;
cds_list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
if (!priv->bitd_svc.online) {
gf_log (this->name, GF_LOG_ERROR, "Bitrot is not "
"running");
ret = -1;
goto out;
}
pending_node = GF_CALLOC (1, sizeof (*pending_node),
gf_gld_mt_pending_node_t);
if (!pending_node) {
ret = -1;
goto out;
}
pending_node->node = &(priv->bitd_svc);
pending_node->type = GD_NODE_BITD;
pending_node->index = 0;
cds_list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
if (!priv->scrub_svc.online) {
gf_log (this->name, GF_LOG_ERROR, "Scrubber is not "
"running");
ret = -1;
goto out;
}
pending_node = GF_CALLOC (1, sizeof (*pending_node),
gf_gld_mt_pending_node_t);
if (!pending_node) {
ret = -1;
goto out;
}
pending_node->node = &(priv->scrub_svc);
pending_node->type = GD_NODE_SCRUB;
pending_node->index = 0;
cds_list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!volinfo->snapd.svc.online) {

View File

@ -4020,6 +4020,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
svc = &(priv->nfs_svc);
else if (strcmp(server, priv->quotad_svc.name) == 0)
svc = &(priv->quotad_svc);
else if (strcmp(server, priv->bitd_svc.name) == 0)
svc = &(priv->bitd_svc);
else if (strcmp(server, priv->scrub_svc.name) == 0)
svc = &(priv->scrub_svc);
//Consider service to be running only when glusterd sees it Online
if (svc->online)
@ -4041,6 +4045,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
ret = dict_set_str (dict, key, "Self-heal Daemon");
else if (!strcmp (server, priv->quotad_svc.name))
ret = dict_set_str (dict, key, "Quota Daemon");
else if (!strcmp (server, priv->bitd_svc.name))
ret = dict_set_str (dict, key, "Bitrot Daemon");
else if (!strcmp (server, priv->scrub_svc.name))
ret = dict_set_str (dict, key, "Scrubber Daemon");
if (ret)
goto out;

View File

@ -447,6 +447,8 @@ typedef enum gd_node_type_ {
GD_NODE_NFS,
GD_NODE_QUOTAD,
GD_NODE_SNAPD,
GD_NODE_BITD,
GD_NODE_SCRUB,
} gd_node_type;
typedef enum missed_snap_stat {