glusterd/cli: cli to get local state representation from glusterd

Currently there is no existing CLI that can be used to get the
local state representation of the cluster as maintained in glusterd
in a readable as well as parseable format.

The CLI added has the following usage:

 # gluster get-state [daemon] [odir <path/to/output/dir>] [file <filename>]

This would dump data points that reflect the local state
representation of the cluster as maintained in glusterd (no other
daemons are supported as of now) to a file inside the specified
output directory. The default output directory and filename is
/var/run/gluster and glusterd_state_<timestamp> respectively. The
option for specifying the daemon name leaves room to add support for
other daemons in the future. Following are the data points captured
as of now to represent the state from the local glusterd pov:

 * Peer:
    - Primary hostname
    - uuid
    - state
    - connection status
    - List of hostnames

 * Volumes:
    - name, id, transport type, status
    - counts: bricks, snap, subvol, stripe, arbiter, disperse,
 redundancy
    - snapd status
    - quorum status
    - tiering related information
    - rebalance status
    - replace bricks status
    - snapshots

 * Bricks:
    - Path, hostname (for all bricks these info will be shown)
    - port, rdma port, status, mount options, filesystem type and
signed in status for bricks running locally.

 * Services:
    - name, online status for initialised services

 * Others:
    - Base port, last allocated port
    - op-version
    - MYUUID

Change-Id: I4a45cc5407ab92d8afdbbd2098ece851f7e3d618
BUG: 1353156
Signed-off-by: Samikshan Bairagya <samikshan@gmail.com>
Reviewed-on: http://review.gluster.org/14873
Reviewed-by: Avra Sengupta <asengupt@redhat.com>
Smoke: Gluster Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
This commit is contained in:
Samikshan Bairagya 2016-07-07 20:33:02 +05:30 committed by Atin Mukherjee
parent 7d3de1aed8
commit 4a3454753f
21 changed files with 1320 additions and 53 deletions

1
.gitignore vendored
View File

@ -84,6 +84,7 @@ libglusterfs/src/y.tab.c
libglusterfs/src/y.tab.h
libglusterfs/src/defaults.c
libglusterfs/src/glusterfs-fops.h
libglusterfs/src/cli1-xdr.h
libtool
run-tests.sh
tests/env.rc

View File

@ -34,7 +34,9 @@ cli_cmd_global_help_cbk (struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
int cli_cmd_ganesha_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
int
cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
struct cli_cmd global_cmds[] = {
{ "global help",
@ -45,6 +47,10 @@ struct cli_cmd global_cmds[] = {
cli_cmd_ganesha_cbk,
"Enable/disable NFS-Ganesha support",
},
{ "get-state [<daemon>] [odir </path/to/output/dir/>] [file <filename>]",
cli_cmd_get_state_cbk,
"Get local state representation of mentioned daemon",
},
{NULL, NULL, NULL}
};
@ -133,3 +139,52 @@ out:
return ret;
}
int
cli_cmd_get_state_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount)
{
int sent = 0;
int parse_error = 0;
int ret = -1;
rpc_clnt_procedure_t *proc = NULL;
call_frame_t *frame = NULL;
dict_t *options = NULL;
cli_local_t *local = NULL;
char *op_errstr = NULL;
frame = create_frame (THIS, THIS->ctx->pool);
if (!frame)
goto out;
ret = cli_cmd_get_state_parse (state, words, wordcount, &options,
&op_errstr);
if (ret) {
if (op_errstr) {
cli_err ("%s", op_errstr);
cli_usage_out (word->pattern);
GF_FREE (op_errstr);
} else
cli_usage_out (word->pattern);
parse_error = 1;
goto out;
}
CLI_LOCAL_INIT (local, words, frame, options);
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_STATE];
if (proc->fn)
ret = proc->fn (frame, THIS, options);
out:
if (ret) {
cli_cmd_sent_status_get (&sent);
if ((sent == 0) && (parse_error == 0))
cli_out ("Getting daemon state failed");
}
CLI_STACK_DESTROY (frame);
return ret;
}

View File

@ -930,6 +930,120 @@ out:
return ret;
}
int32_t
cli_cmd_get_state_parse (struct cli_state *state,
const char **words, int wordcount,
dict_t **options, char **op_errstr)
{
dict_t *dict = NULL;
int ret = -1;
uint32_t cmd = 0;
char *odir = NULL;
char *filename = NULL;
char *daemon_name = NULL;
int count = 0;
GF_VALIDATE_OR_GOTO ("cli", options, out);
GF_VALIDATE_OR_GOTO ("cli", words, out);
dict = dict_new ();
if (!dict)
goto out;
if (wordcount < 1 || wordcount > 6) {
*op_errstr = gf_strdup ("Problem parsing arguments."
" Check usage.");
goto out;
}
if (wordcount >= 1) {
gf_asprintf (&daemon_name, "%s", "glusterd");
for (count = 1; count < wordcount; count++) {
if (strcmp (words[count], "odir") == 0 ||
strcmp (words[count], "file") == 0) {
if (strcmp (words[count], "odir") == 0) {
if (++count < wordcount) {
odir = (char *) words[count];
continue;
} else {
ret = -1;
goto out;
}
} else if (strcmp (words[count], "file") == 0) {
if (++count < wordcount) {
filename = (char *) words[count];
continue;
} else {
ret = -1;
goto out;
}
}
} else {
if (count > 1) {
*op_errstr = gf_strdup ("Problem "
"parsing arguments. "
"Check usage.");
ret = -1;
goto out;
}
if (strcmp (words[count], "glusterd") == 0) {
continue;
} else {
*op_errstr = gf_strdup ("glusterd is "
"the only supported daemon.");
ret = -1;
goto out;
}
}
}
ret = dict_set_str (dict, "daemon", daemon_name);
if (ret) {
*op_errstr = gf_strdup ("Command failed. Please check "
" log file for more details.");
gf_log (THIS->name, GF_LOG_ERROR,
"Setting daemon name to dictionary failed");
goto out;
}
if (odir) {
ret = dict_set_str (dict, "odir", odir);
if (ret) {
*op_errstr = gf_strdup ("Command failed. Please"
" check log file for"
" more details.");
gf_log (THIS->name, GF_LOG_ERROR,
"Setting output directory to"
"dictionary failed");
goto out;
}
}
if (filename) {
ret = dict_set_str (dict, "filename", filename);
if (ret) {
*op_errstr = gf_strdup ("Command failed. Please"
" check log file for"
" more details.");
gf_log (THIS->name, GF_LOG_ERROR,
"Setting filename to dictionary failed");
goto out;
}
}
}
out:
if (dict)
*options = dict;
if (ret && dict)
dict_unref (dict);
return ret;
}
int32_t
cli_cmd_inode_quota_parse (const char **words, int wordcount, dict_t **options)
{

View File

@ -55,17 +55,6 @@ int32_t
gf_cli_remove_brick (call_frame_t *frame, xlator_t *this,
void *data);
char *cli_vol_type_str[] = {"Distribute",
"Stripe",
"Replicate",
"Striped-Replicate",
"Disperse",
"Tier",
"Distributed-Stripe",
"Distributed-Replicate",
"Distributed-Striped-Replicate",
"Distributed-Disperse",
};
char *cli_vol_status_str[] = {"Created",
"Started",
@ -503,6 +492,73 @@ out:
return ret;
}
int
gf_cli_get_state_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
dict_t *dict = NULL;
char *daemon_name = NULL;
char *ofilepath = NULL;
GF_VALIDATE_OR_GOTO ("cli", myframe, out);
if (-1 == req->rpc_status) {
goto out;
}
ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log (((call_frame_t *) myframe)->this->name, GF_LOG_ERROR,
"Failed to decode xdr response");
goto out;
}
dict = dict_new ();
if (!dict) {
ret = -1;
goto out;
}
ret = dict_unserialize (rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret)
goto out;
if (rsp.op_ret) {
if (strcmp (rsp.op_errstr, ""))
cli_err ("Failed to get daemon state: %s", rsp.op_errstr);
else
cli_err ("Failed to get daemon state. Check glusterd"
" log file for more details");
} else {
ret = dict_get_str (dict, "daemon", &daemon_name);
if (ret)
gf_log ("cli", GF_LOG_ERROR, "Couldn't get daemon name");
ret = dict_get_str (dict, "ofilepath", &ofilepath);
if (ret)
gf_log ("cli", GF_LOG_ERROR, "Couldn't get filepath");
if (daemon_name && ofilepath)
cli_out ("%s state dumped to %s",
daemon_name, ofilepath);
}
ret = rsp.op_ret;
out:
free (rsp.dict.dict_val);
free (rsp.op_errstr);
if (dict)
dict_unref (dict);
cli_cmd_broadcast_response (ret);
return ret;
}
void
cli_out_options ( char *substr, char *optstr, char *valstr)
{
@ -725,13 +781,11 @@ gf_cli_print_tier_info (dict_t *dict, int i, int brick_count)
vol_type = hot_type;
hot_dist_count = (hot_replica_count ?
hot_replica_count : 1);
if ((hot_type != GF_CLUSTER_TYPE_TIER) &&
(hot_type > 0) &&
(hot_dist_count < hot_brick_count))
vol_type = hot_type + GF_CLUSTER_TYPE_MAX - 1;
vol_type = get_vol_type (hot_type, hot_dist_count, hot_brick_count);
cli_out ("Hot Tier Type : %s",
cli_vol_type_str[vol_type]);
vol_type_str[vol_type]);
gf_cli_print_number_of_bricks (hot_type,
hot_brick_count, hot_dist_count, 0,
hot_replica_count, 0, 0, 0);
@ -742,14 +796,11 @@ gf_cli_print_tier_info (dict_t *dict, int i, int brick_count)
goto out;
cli_out ("Cold Tier:");
vol_type = cold_type;
if ((cold_type != GF_CLUSTER_TYPE_TIER) &&
(cold_type > 0) &&
(cold_dist_count < cold_brick_count))
vol_type = cold_type + GF_CLUSTER_TYPE_MAX - 1;
vol_type = get_vol_type (cold_type, cold_dist_count, cold_brick_count);
cli_out ("Cold Tier Type : %s",
cli_vol_type_str[vol_type]);
vol_type_str[vol_type]);
gf_cli_print_number_of_bricks (cold_type,
cold_brick_count,
cold_dist_count, 0, cold_replica_count,
@ -973,15 +1024,11 @@ xml_output:
if (ret)
goto out;
vol_type = type;
// Distributed (stripe/replicate/stripe-replica) setups
if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
(dist_count < brick_count))
vol_type = type + GF_CLUSTER_TYPE_MAX - 1;
vol_type = get_vol_type (type, dist_count, brick_count);
cli_out ("Volume Name: %s", volname);
cli_out ("Type: %s", cli_vol_type_str[vol_type]);
cli_out ("Type: %s", vol_type_str[vol_type]);
cli_out ("Volume ID: %s", volume_id_str);
cli_out ("Status: %s", cli_vol_status_str[status]);
cli_out ("Snapshot Count: %d", snap_count);
@ -4150,6 +4197,32 @@ out:
return ret;
}
int32_t
gf_cli_get_state (call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{0,},};
int ret = 0;
dict_t *dict = NULL;
char *odir = NULL;
if (!frame || !this || !data) {
ret = -1;
goto out;
}
dict = data;
ret = cli_to_glusterd (&req, frame, gf_cli_get_state_cbk,
(xdrproc_t) xdr_gf_cli_req, dict,
GLUSTER_CLI_GET_STATE, this, cli_rpc_prog,
NULL);
out:
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
gf_cli_get_next_volume (call_frame_t *frame, xlator_t *this,
void *data)
@ -10918,7 +10991,6 @@ cli_to_glusterd (gf_cli_req *req, call_frame_t *frame,
ret = cli_cmd_submit (NULL, req, frame, prog, procnum, iobref, this,
cbkfn, (xdrproc_t) xdrproc);
out:
return ret;
@ -11256,7 +11328,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe},
[GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", gf_cli_list_friends},
[GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", gf_cli3_1_uuid_reset},
[GLUSTER_CLI_UUID_GET] = {"UUID_GET", gf_cli3_1_uuid_get},
[GLUSTER_CLI_UUID_GET] = {"UUID_GET", gf_cli3_1_uuid_get},
[GLUSTER_CLI_CREATE_VOLUME] = {"CREATE_VOLUME", gf_cli_create_volume},
[GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", gf_cli_delete_volume},
[GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", gf_cli_start_volume},
@ -11297,7 +11369,8 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot},
[GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", gf_cli_attach_tier},
[GLUSTER_CLI_DETACH_TIER] = {"DETACH_TIER", gf_cli_detach_tier},
[GLUSTER_CLI_TIER] = {"TIER", gf_cli_tier}
[GLUSTER_CLI_TIER] = {"TIER", gf_cli_tier},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", gf_cli_get_state}
};
struct rpc_clnt_program cli_prog = {

View File

@ -2720,9 +2720,7 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
/* For Distributed-(stripe,replicate,stipe-replicate,disperse)
types
*/
if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
(dist_count < brick_count))
type = type + GF_CLUSTER_TYPE_MAX - 1;
type = get_vol_type (type, dist_count, brick_count);
ret = xmlTextWriterWriteFormatElement (local->writer,
(xmlChar *)"type",
@ -2732,7 +2730,7 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
ret = xmlTextWriterWriteFormatElement (local->writer,
(xmlChar *)"typeStr",
"%s",
cli_vol_type_str[type]);
vol_type_str[type]);
XML_RET_CHECK_AND_GOTO (ret, out);
memset (key, 0, sizeof (key));
@ -2819,9 +2817,13 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
goto out;
}
tier_vol_type = value[HOT_TYPE];
hot_dist_count = (value[HOT_REPLICA_COUNT] ?
value[HOT_REPLICA_COUNT] : 1);
tier_vol_type = get_vol_type (value[HOT_TYPE],
hot_dist_count,
value[HOT_BRICK_COUNT]);
if ((value[HOT_TYPE] != GF_CLUSTER_TYPE_TIER) &&
(value[HOT_TYPE] > 0) &&
(hot_dist_count < value[HOT_BRICK_COUNT]))
@ -2835,7 +2837,7 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
ret = xmlTextWriterWriteFormatElement
(local->writer, (xmlChar *)"hotBrickType",
"%s", cli_vol_type_str[tier_vol_type]);
"%s", vol_type_str[tier_vol_type]);
ret = xmlTextWriterWriteFormatElement (local->writer,
(xmlChar *)"hotreplicaCount",
@ -2912,13 +2914,9 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
ret = xmlTextWriterEndElement (local->writer);
XML_RET_CHECK_AND_GOTO (ret, out);
tier_vol_type = value[COLD_TYPE];
if ((value[COLD_TYPE] != GF_CLUSTER_TYPE_TIER) &&
(value[COLD_TYPE] > 0) &&
(value[COLD_DIST_COUNT] < value[COLD_BRICK_COUNT]))
tier_vol_type = value[COLD_TYPE] +
GF_CLUSTER_TYPE_MAX - 1;
tier_vol_type = get_vol_type (value[COLD_TYPE],
value[COLD_DIST_COUNT],
value[COLD_BRICK_COUNT]);
ret = xmlTextWriterStartElement (local->writer,
(xmlChar *)
@ -2927,7 +2925,7 @@ cli_xml_output_vol_info (cli_local_t *local, dict_t *dict)
ret = xmlTextWriterWriteFormatElement
(local->writer, (xmlChar *)"coldBrickType",
"%s", cli_vol_type_str[tier_vol_type]);
"%s", vol_type_str[tier_vol_type]);
ret = xmlTextWriterWriteFormatElement (local->writer,
(xmlChar *)"coldreplicaCount",

View File

@ -71,7 +71,6 @@ struct cli_cmd_word;
struct cli_cmd_tree;
struct cli_cmd;
extern char *cli_vol_type_str[];
extern char *cli_vol_status_str[];
extern char *cli_vol_task_status_str[];
@ -260,6 +259,10 @@ int32_t
cli_cmd_ganesha_parse (struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
int32_t
cli_cmd_get_state_parse (struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
int32_t
cli_cmd_volume_add_brick_parse (const char **words, int wordcount,
dict_t **options, int *type);

View File

@ -269,6 +269,9 @@ Selects <HOSTNAME:BRICKNAME> as the source for all the files that are in split-b
Selects the split-brained <FILE> present in <HOSTNAME:BRICKNAME> as source and completes heal.
.SS "Other Commands"
.TP
\fB\ get-state [<daemon>] [odir </path/to/output/dir/>] [file <filename>] \fR
Get local state representation of mentioned daemon and store data in provided path information
.TP
\fB\ help \fR
Display the command options.
.TP

View File

@ -6,9 +6,10 @@ libglusterfs_la_CFLAGS = $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS) \
libglusterfs_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
-DXLATORPARENTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)\" \
-I$(top_srcdir)/rpc/rpc-lib/src/ -I$(CONTRIBDIR)/rbtree \
-I$(CONTRIBDIR)/libexecinfo ${ARGP_STANDALONE_CPPFLAGS} \
-DSBIN_DIR=\"$(sbindir)\" -I$(CONTRIBDIR)/timer-wheel
-I$(top_srcdir)/rpc/xdr/src/ -I$(top_srcdir)/rpc/rpc-lib/src/ \
-I$(CONTRIBDIR)/rbtree -I$(CONTRIBDIR)/libexecinfo \
${ARGP_STANDALONE_CPPFLAGS} -DSBIN_DIR=\"$(sbindir)\" \
-I$(CONTRIBDIR)/timer-wheel
libglusterfs_la_LIBADD = @LEXLIB@ $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS)
libglusterfs_la_LDFLAGS = -version-info $(LIBGLUSTERFS_LT_VERSION)
@ -35,9 +36,9 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
compound-fop-utils.c throttle-tbf.c
nodist_libglusterfs_la_SOURCES = y.tab.c graph.lex.c defaults.c
nodist_libglusterfs_la_HEADERS = y.tab.h glusterfs-fops.h
nodist_libglusterfs_la_HEADERS = y.tab.h glusterfs-fops.h cli1-xdr.h
BUILT_SOURCES = graph.lex.c defaults.c glusterfs-fops.h eventtypes.h
BUILT_SOURCES = graph.lex.c defaults.c glusterfs-fops.h eventtypes.h cli1-xdr.h
libglusterfs_la_HEADERS = common-utils.h defaults.h default-args.h \
dict.h glusterfs.h hashfn.h timespec.h logging.h xlator.h \
@ -99,6 +100,12 @@ $(top_srcdir)/rpc/xdr/src/glusterfs-fops.h: $(top_srcdir)/rpc/xdr/src/glusterfs-
glusterfs-fops.h: $(top_srcdir)/rpc/xdr/src/glusterfs-fops.h
cp $(top_srcdir)/rpc/xdr/src/glusterfs-fops.h .
$(top_srcdir)/rpc/xdr/src/cli1-xdr.h: $(top_srcdir)/rpc/xdr/src/cli1-xdr.x
$(MAKE) -C $(top_builddir)/rpc/xdr/src/ `basename $@`
cli1-xdr.h: $(top_srcdir)/rpc/xdr/src/cli1-xdr.h
cp $(top_srcdir)/rpc/xdr/src/cli1-xdr.h .
CLEANFILES = $(nodist_libglusterfs_la_SOURCES) $(nodist_libglusterfs_la_HEADERS)
if UNITTEST

View File

@ -46,6 +46,7 @@
#include "globals.h"
#include "lkowner.h"
#include "syscall.h"
#include "cli1-xdr.h"
#include <ifaddrs.h>
#include "libglusterfs-messages.h"
@ -53,6 +54,18 @@
#define AI_ADDRCONFIG 0
#endif /* AI_ADDRCONFIG */
char *vol_type_str[] = {"Distribute",
"Stripe",
"Replicate",
"Striped-Replicate",
"Disperse",
"Tier",
"Distributed-Stripe",
"Distributed-Replicate",
"Distributed-Striped-Replicate",
"Distributed-Disperse",
};
typedef int32_t (*rw_op_t)(int32_t fd, char *buf, int32_t size);
typedef int32_t (*rwv_op_t)(int32_t fd, const struct iovec *buf, int32_t size);
@ -2722,6 +2735,16 @@ out:
return result;
}
int
get_vol_type (int type, int dist_count, int brick_count)
{
if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
(dist_count < brick_count))
type = type + GF_CLUSTER_TYPE_MAX - 1;
return type;
}
int
validate_brick_name (char *brick)
{

View File

@ -209,6 +209,8 @@ struct list_node {
struct list_head list;
};
extern char *vol_type_str[];
struct list_node *list_node_add (void *ptr, struct list_head *list);
struct list_node *list_node_add_order (void *ptr, struct list_head *list,
int (*compare)(struct list_head *,
@ -765,6 +767,7 @@ void gf_array_insertionsort (void *a, int l, int r, size_t elem_size,
int gf_is_str_int (const char *value);
char *gf_uint64_2human_readable (uint64_t);
int get_vol_type (int type, int dist_count, int brick_count);
int validate_brick_name (char *brick);
char *get_host_name (char *word, char **host);
char *get_path_name (char *word, char **path);

View File

@ -197,6 +197,7 @@ enum gluster_cli_procnum {
GLUSTER_CLI_ATTACH_TIER,
GLUSTER_CLI_DETACH_TIER,
GLUSTER_CLI_TIER,
GLUSTER_CLI_GET_STATE,
GLUSTER_CLI_MAXVALUE,
};

View File

@ -0,0 +1,141 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../fileio.rc
. $(dirname $0)/../../snapshot.rc
cleanup;
ODIR="/var/tmp/gdstates/"
NOEXDIR="/var/tmp/gdstatesfoo/"
function get_daemon_not_supported_part {
echo $1
}
function get_usage_part {
echo $7
}
function get_directory_doesnt_exist_part {
echo $1
}
function get_parsing_arguments_part {
echo $1
}
TEST glusterd
TEST pidof glusterd
TEST mkdir $ODIR
TEST $CLI volume create $V0 disperse $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
TEST $CLI volume start $V0
TEST $CLI volume tier $V0 attach replica 2 $H0:$B1/b4 $H0:$B1/b5
TEST setup_lvm 1
TEST $CLI volume create $V1 $H0:$L1;
TEST $CLI volume start $V1
TEST $CLI snapshot create ${V1}_snap $V1
OPATH=$(echo `$CLI get-state` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state glusterd` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
TEST ! $CLI get-state glusterfsd;
ERRSTR=$($CLI get-state glusterfsd 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
OPATH=$(echo `$CLI get-state file gdstate` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state glusterd file gdstate` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
TEST ! $CLI get-state glusterfsd file gdstate;
ERRSTR=$($CLI get-state glusterfsd file gdstate 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
OPATH=$(echo `$CLI get-state odir $ODIR` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state glusterd odir $ODIR` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state odir $ODIR file gdstate` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state glusterd odir $ODIR file gdstate` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
OPATH=$(echo `$CLI get-state glusterd odir $ODIR file gdstate` | awk '{print $5}' | tr -d '\n')
TEST fd=`fd_available`
TEST fd_open $fd "r" $OPATH;
TEST fd_close $fd;
rm $OPATH
TEST ! $CLI get-state glusterfsd odir $ODIR;
ERRSTR=$($CLI get-state glusterfsd odir $ODIR 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
TEST ! $CLI get-state glusterfsd odir $ODIR file gdstate;
ERRSTR=$($CLI get-state glusterfsd odir $ODIR file gdstate 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
TEST ! $CLI get-state glusterfsd odir $NOEXDIR file gdstate;
ERRSTR=$($CLI get-state glusterfsd odir $NOEXDIR file gdstate 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
TEST ! $CLI get-state odir $NOEXDIR;
ERRSTR=$($CLI get-state odir $NOEXDIR 2>&1 >/dev/null);
EXPECT 'Failed' get_directory_doesnt_exist_part $ERRSTR;
TEST ! $CLI get-state odir $NOEXDIR file gdstate;
ERRSTR=$($CLI get-state odir $NOEXDIR 2>&1 >/dev/null);
EXPECT 'Failed' get_directory_doesnt_exist_part $ERRSTR;
TEST ! $CLI get-state foo bar;
ERRSTR=$($CLI get-state foo bar 2>&1 >/dev/null);
EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
EXPECT 'Usage:' get_usage_part $ERRSTR;
TEST ! $CLI get-state glusterd foo bar;
ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
rm -Rf $ODIR
cleanup;

View File

@ -4947,6 +4947,521 @@ glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req, __glusterd_handle_get_vol_opt);
}
static int
glusterd_print_global_options (dict_t *opts, char *key, data_t *val, void *data)
{
FILE *fp = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, key, out);
GF_VALIDATE_OR_GOTO (THIS->name, val, out);
GF_VALIDATE_OR_GOTO (THIS->name, data, out);
fp = (FILE *) data;
fprintf (fp, "%s: %s\n", key, val->data);
out:
return 0;
}
static int
glusterd_print_snapinfo_by_vol (FILE *fp, glusterd_volinfo_t *volinfo, int volcount)
{
int ret = -1;
glusterd_volinfo_t *snap_vol = NULL;
glusterd_volinfo_t *tmp_vol = NULL;
glusterd_snap_t *snapinfo = NULL;
int snapcount = 0;
char timestr[64] = {0,};
char snap_status_str[STATUS_STRLEN] = {0,};
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, fp, out);
cds_list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
snapvol_list) {
snapcount++;
snapinfo = snap_vol->snapshot;
ret = glusterd_get_snap_status_str (snapinfo, snap_status_str);
if (ret) {
gf_msg (THIS->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get status for snapshot: %s",
snapinfo->snapname);
goto out;
}
gf_time_fmt (timestr, sizeof timestr, snapinfo->time_stamp,
gf_timefmt_FT);
fprintf (fp, "Volume%d.snapshot%d.name: %s\n",
volcount, snapcount, snapinfo->snapname);
fprintf (fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
gf_strdup (uuid_utoa (snapinfo->snap_id)));
fprintf (fp, "Volume%d.snapshot%d.time: %s\n",
volcount, snapcount, timestr);
if (snapinfo->description)
fprintf (fp, "Volume%d.snapshot%d.description: %s\n",
volcount, snapcount, snapinfo->description);
fprintf (fp, "Volume%d.snapshot%d.status: %s\n",
volcount, snapcount, snap_status_str);
}
ret = 0;
out:
return ret;
}
static int
glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
{
int32_t ret = -1;
gf_cli_rsp rsp = {0,};
int fd = -1;
FILE *fp = NULL;
DIR *dp = NULL;
char err_str[2048] = {0,};
glusterd_conf_t *priv = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_peer_hostname_t *peer_hostname_info = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_snap_t *snapinfo = NULL;
xlator_t *this = NULL;
char *odir = NULL;
char *filename = NULL;
char *ofilepath = NULL;
int count = 0;
int count_bkp = 0;
int odirlen = 0;
time_t now = 0;
char timestamp[16] = {0,};
char *vol_type_str = NULL;
char *hot_tier_type_str = NULL;
char *cold_tier_type_str = NULL;
char transport_type_str[STATUS_STRLEN] = {0,};
char quorum_status_str[STATUS_STRLEN] = {0,};
char rebal_status_str[STATUS_STRLEN] = {0,};
char peer_state_str[STATUS_STRLEN] = {0,};
char vol_status_str[STATUS_STRLEN] = {0,};
this = THIS;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
priv = THIS->private;
GF_VALIDATE_OR_GOTO (this->name, priv, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
ret = dict_get_str (dict, "odir", &odir);
if (ret) {
gf_asprintf (&odir, "%s", "/var/run/gluster/");
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_DICT_GET_FAILED,
"Default output directory: %s", odir);
}
dp = sys_opendir (odir);
if (dp) {
sys_closedir (dp);
} else {
if (errno == ENOENT) {
snprintf (err_str, sizeof (err_str),
"Output directory %s does not exist.", odir);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
} else if (errno == ENOTDIR) {
snprintf (err_str, sizeof (err_str), "Output directory "
"does not exist. %s points to a file.", odir);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
}
ret = -1;
goto out;
}
ret = dict_get_str (dict, "filename", &filename);
if (ret) {
now = time (NULL);
strftime (timestamp, sizeof (timestamp),
"%Y%m%d_%H%M%S", localtime (&now));
gf_asprintf (&filename, "%s_%s", "glusterd_state", timestamp);
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_DICT_GET_FAILED,
"Default filename: %s", filename);
}
odirlen = strlen (odir);
if (odir[odirlen-1] != '/')
strcat (odir, "/");
gf_asprintf (&ofilepath, "%s%s", odir, filename);
ret = dict_set_str (dict, "ofilepath", ofilepath);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED, "Unable to set output path");
goto out;
}
fp = fopen (ofilepath, "w");
if (!fp) {
snprintf (err_str, sizeof (err_str),
"Failed to open file at %s", ofilepath);
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_GET_FAILED, "%s", err_str);
ret = -1;
goto out;
}
fprintf (fp, "[Global]\n");
fprintf (fp, "MYUUID: %s\n", gf_strdup (uuid_utoa (priv->uuid)));
fprintf (fp, "op-version: %d\n", priv->op_version);
fprintf (fp, "\n[Global options]\n");
if (priv->opts)
dict_foreach (priv->opts, glusterd_print_global_options, fp);
rcu_read_lock ();
fprintf (fp, "\n[Peers]\n");
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
ret = gd_peer_state_str (peerinfo, peer_state_str);
if (ret) {
rcu_read_unlock ();
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get peer state");
goto out;
}
fprintf (fp, "Peer%d.primary_hostname: %s\n", ++count,
peerinfo->hostname);
fprintf (fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str (peerinfo));
fprintf (fp, "Peer%d.state: %s\n", count, peer_state_str);
fprintf (fp, "Peer%d.connected: %d\n", count, peerinfo->connected);
fprintf (fp, "Peer%d.hostnames: ", count);
cds_list_for_each_entry (peer_hostname_info,
&peerinfo->hostnames, hostname_list)
fprintf (fp, "%s, ", peer_hostname_info->hostname);
fprintf (fp, "\n");
}
rcu_read_unlock ();
count = 0;
fprintf (fp, "\n[Volumes]\n");
cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
ret = glusterd_volume_get_type_str (volinfo, &vol_type_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get type for volume: %s",
volinfo->volname);
goto out;
}
ret = glusterd_volume_get_status_str (volinfo, vol_status_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get status for volume: %s",
volinfo->volname);
goto out;
}
ret = glusterd_volume_get_transport_type_str (volinfo,
transport_type_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get transport type for volume: %s",
volinfo->volname);
goto out;
}
ret = glusterd_volume_get_quorum_status_str (volinfo,
quorum_status_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get quorum status for volume: %s",
volinfo->volname);
goto out;
}
ret = glusterd_volume_get_rebalance_status_str (volinfo,
rebal_status_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get rebalance status for volume: %s",
volinfo->volname);
goto out;
}
fprintf (fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
fprintf (fp, "Volume%d.id: %s\n", count,
gf_strdup (uuid_utoa (volinfo->volume_id)));
fprintf (fp, "Volume%d.type: %s\n", count, vol_type_str);
fprintf (fp, "Volume%d.transport_type: %s\n", count,
transport_type_str);
fprintf (fp, "Volume%d.status: %s\n", count, vol_status_str);
fprintf (fp, "Volume%d.brickcount: %d\n", count,
volinfo->brick_count);
count_bkp = count;
count = 0;
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
fprintf (fp, "Volume%d.Brick%d.path: %s:%s\n",
count_bkp, ++count, brickinfo->hostname,
brickinfo->path);
fprintf (fp, "Volume%d.Brick%d.hostname: %s\n",
count_bkp, count, brickinfo->hostname);
/* Add following information only for bricks
* local to current node */
if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
continue;
fprintf (fp, "Volume%d.Brick%d.port: %d\n", count_bkp,
count, brickinfo->port);
fprintf (fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp,
count, brickinfo->rdma_port);
fprintf (fp, "Volume%d.Brick%d.status: %s\n", count_bkp,
count, brickinfo->status ? "Started" : "Stopped");
fprintf (fp, "Volume%d.Brick%d.filesystem_type: %s\n",
count_bkp, count, brickinfo->fstype);
fprintf (fp, "Volume%d.Brick%d.mount_options: %s\n",
count_bkp, count, brickinfo->mnt_opts);
fprintf (fp, "Volume%d.Brick%d.signedin: %s\n", count_bkp,
count, brickinfo->signed_in ? "True" : "False");
}
count = count_bkp;
ret = glusterd_print_snapinfo_by_vol (fp, volinfo, count);
if (ret)
goto out;
fprintf (fp, "Volume%d.snap_count: %"PRIu64"\n", count,
volinfo->snap_count);
fprintf (fp, "Volume%d.stripe_count: %d\n", count,
volinfo->stripe_count);
fprintf (fp, "Volume%d.subvol_count: %d\n", count,
volinfo->subvol_count);
fprintf (fp, "Volume%d.arbiter_count: %d\n", count,
volinfo->arbiter_count);
fprintf (fp, "Volume%d.disperse_count: %d\n", count,
volinfo->disperse_count);
fprintf (fp, "Volume%d.redundancy_count: %d\n", count,
volinfo->redundancy_count);
fprintf (fp, "Volume%d.quorum_status: %s\n", count,
quorum_status_str);
fprintf (fp, "Volume%d.snapd_svc.online_status: %s\n", count,
volinfo->snapd.svc.online ? "Online" : "Offline");
fprintf (fp, "Volume%d.snapd_svc.inited: %s\n", count,
volinfo->snapd.svc.inited ? "True" : "False");
fprintf (fp, "Volume%d.rebalance.id: %s\n", count,
gf_strdup (uuid_utoa (volinfo->rebal.rebalance_id)));
fprintf (fp, "Volume%d.rebalance.status: %s\n", count,
rebal_status_str);
fprintf (fp, "Volume%d.rebalance.failures: %"PRIu64"\n", count,
volinfo->rebal.rebalance_failures);
fprintf (fp, "Volume%d.rebalance.skipped: %"PRIu64"\n", count,
volinfo->rebal.skipped_files);
fprintf (fp, "Volume%d.rebalance.lookedup: %"PRIu64"\n", count,
volinfo->rebal.lookedup_files);
fprintf (fp, "Volume%d.rebalance.files: %"PRIu64"\n", count,
volinfo->rebal.rebalance_files);
fprintf (fp, "Volume%d.rebalance.data: %"PRIu64"\n", count,
volinfo->rebal.rebalance_data);
fprintf (fp, "Volume%d.rebalance.data: %"PRIu64"\n", count,
volinfo->rebal.rebalance_data);
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
ret = glusterd_volume_get_hot_tier_type_str (
volinfo, &hot_tier_type_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get hot tier type for "
"volume: %s", volinfo->volname);
goto out;
}
ret = glusterd_volume_get_cold_tier_type_str (
volinfo, &cold_tier_type_str);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_STATE_STR_GET_FAILED,
"Failed to get cold tier type for "
"volume: %s", volinfo->volname);
goto out;
}
fprintf (fp, "Volume%d.tier_info.cold_tier_type: %s\n",
count, cold_tier_type_str);
fprintf (fp, "Volume%d.tier_info.cold_brick_count: %d\n",
count, volinfo->tier_info.cold_brick_count);
fprintf (fp, "Volume%d.tier_info.cold_replica_count: %d\n",
count, volinfo->tier_info.cold_replica_count);
fprintf (fp, "Volume%d.tier_info.cold_disperse_count: %d\n",
count, volinfo->tier_info.cold_disperse_count);
fprintf (fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n",
count, volinfo->tier_info.cold_dist_leaf_count);
fprintf (fp, "Volume%d.tier_info.cold_redundancy_count: %d\n",
count, volinfo->tier_info.cold_redundancy_count);
fprintf (fp, "Volume%d.tier_info.hot_tier_type: %s\n",
count, hot_tier_type_str);
fprintf (fp, "Volume%d.tier_info.hot_brick_count: %d\n",
count, volinfo->tier_info.hot_brick_count);
fprintf (fp, "Volume%d.tier_info.hot_replica_count: %d\n",
count, volinfo->tier_info.hot_replica_count);
fprintf (fp, "Volume%d.tier_info.promoted: %d\n",
count, volinfo->tier_info.promoted);
fprintf (fp, "Volume%d.tier_info.demoted: %d\n",
count, volinfo->tier_info.demoted);
}
if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
fprintf (fp, "Volume%d.replace_brick.src: %s:%s\n", count,
volinfo->rep_brick.src_brick->hostname,
volinfo->rep_brick.src_brick->path);
fprintf (fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
volinfo->rep_brick.dst_brick->hostname,
volinfo->rep_brick.dst_brick->path);
}
fprintf (fp, "\n");
}
count = 0;
fprintf (fp, "\n[Services]\n");
if (priv->shd_svc.inited) {
fprintf (fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
fprintf (fp, "svc%d.online_status: %s\n\n", count,
priv->shd_svc.online ? "Online" : "Offline");
}
if (priv->nfs_svc.inited) {
fprintf (fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
fprintf (fp, "svc%d.online_status: %s\n\n", count,
priv->nfs_svc.online ? "Online" : "Offline");
}
if (priv->bitd_svc.inited) {
fprintf (fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
fprintf (fp, "svc%d.online_status: %s\n\n", count,
priv->bitd_svc.online ? "Online" : "Offline");
}
if (priv->scrub_svc.inited) {
fprintf (fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
fprintf (fp, "svc%d.online_status: %s\n\n", count,
priv->scrub_svc.online ? "Online" : "Offline");
}
if (priv->quotad_svc.inited) {
fprintf (fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
fprintf (fp, "svc%d.online_status: %s\n\n", count,
priv->quotad_svc.online ? "Online" : "Offline");
}
fprintf (fp, "\n[Misc]\n");
if (priv->pmap) {
fprintf (fp, "Base port: %d\n", priv->pmap->base_port);
fprintf (fp, "Last allocated port: %d\n",
priv->pmap->last_alloc);
}
out:
if (fp)
fclose(fp);
rsp.op_ret = ret;
rsp.op_errstr = err_str;
ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
glusterd_to_cli (req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gf_cli_rsp, dict);
return ret;
}
static int
__glusterd_handle_get_state (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,},};
dict_t *dict = NULL;
char err_str[2048] = {0,};
xlator_t *this = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, req, out);
ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
snprintf (err_str, sizeof (err_str), "Failed to decode "
"request received from cli");
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
req->rpc_err = GARBAGE_ARGS;
goto out;
}
if (cli_req.dict.dict_len) {
/* Unserialize the dictionary */
dict = dict_new ();
ret = dict_unserialize (cli_req.dict.dict_val,
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_UNSERIALIZE_FAIL,
"failed to "
"unserialize req-buffer to dictionary");
snprintf (err_str, sizeof (err_str), "Unable to decode"
" the command");
goto out;
} else {
dict->extra_stdfree = cli_req.dict.dict_val;
}
}
gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
"Received request to get state for glusterd");
ret = glusterd_get_state (req, dict);
out:
if (dict)
dict_unref (dict);
return ret;
}
int
glusterd_handle_get_state (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
__glusterd_handle_get_state);
}
static int
get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
{
@ -5410,6 +5925,7 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_GANESHA] = { "GANESHA" , GLUSTER_CLI_GANESHA, glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA},
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
[GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE, glusterd_handle_get_state, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {

View File

@ -41,7 +41,7 @@
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
#define GLFS_NUM_MESSAGES 583
#define GLFS_NUM_MESSAGES 585
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
@ -4713,6 +4713,22 @@
*/
#define GD_MSG_SYSCALL_FAIL (GLUSTERD_COMP_BASE + 583)
/*!
* @messageid
* @diagnosis
* @recommendation
*
*/
#define GD_MSG_DAEMON_STATE_REQ_RCVD (GLUSTERD_COMP_BASE + 584)
/*!
* @messageid
* @diagnosis
* @recommendation
*
*/
#define GD_MSG_STATE_STR_GET_FAILED (GLUSTERD_COMP_BASE + 585)
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
#endif /* !_GLUSTERD_MESSAGES_H_ */

View File

@ -393,6 +393,57 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
return peerinfo->uuid_str;
}
int
gd_peer_state_str (glusterd_peerinfo_t *peerinfo, char *state_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, peerinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, state_str, out);
switch (peerinfo->state.state) {
case GD_FRIEND_STATE_DEFAULT:
gf_asprintf (&state_str, "%s", "default");
break;
case GD_FRIEND_STATE_REQ_SENT:
gf_asprintf (&state_str, "%s", "request sent");
break;
case GD_FRIEND_STATE_REQ_RCVD:
gf_asprintf (&state_str, "%s", "request received");
break;
case GD_FRIEND_STATE_BEFRIENDED:
gf_asprintf (&state_str, "%s", "befriended");
break;
case GD_FRIEND_STATE_REQ_ACCEPTED:
gf_asprintf (&state_str, "%s", "request accepted");
break;
case GD_FRIEND_STATE_REQ_SENT_RCVD:
gf_asprintf (&state_str, "%s", "request sent received");
break;
case GD_FRIEND_STATE_REJECTED:
gf_asprintf (&state_str, "%s", "rejected");
break;
case GD_FRIEND_STATE_UNFRIEND_SENT:
gf_asprintf (&state_str, "%s", "unfriend sent");
break;
case GD_FRIEND_STATE_PROBE_RCVD:
gf_asprintf (&state_str, "%s", "probe received");
break;
case GD_FRIEND_STATE_CONNECTED_RCVD:
gf_asprintf (&state_str, "%s", "connected received");
break;
case GD_FRIEND_STATE_CONNECTED_ACCEPTED:
gf_asprintf (&state_str, "%s", "connected accepted");
break;
case GD_FRIEND_STATE_MAX:
goto out;
}
ret = 0;
out:
return ret;
}
gf_boolean_t
glusterd_are_all_peers_up ()
{

View File

@ -42,6 +42,9 @@ glusterd_uuid_to_hostname (uuid_t uuid);
char*
gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo);
int
gd_peer_state_str (glusterd_peerinfo_t *peerinfo, char *state_str);
gf_boolean_t
glusterd_are_all_peers_up ();

View File

@ -4093,3 +4093,39 @@ gd_get_snap_conf_values_if_present (dict_t *dict, uint64_t *sys_hard_limit,
GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT);
}
}
int
glusterd_get_snap_status_str (glusterd_snap_t *snapinfo, char *snap_status_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, snapinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, snap_status_str, out);
switch (snapinfo->snap_status) {
case GD_SNAP_STATUS_NONE:
sprintf (snap_status_str, "%s", "none");
break;
case GD_SNAP_STATUS_INIT:
sprintf (snap_status_str, "%s", "init");
break;
case GD_SNAP_STATUS_IN_USE:
sprintf (snap_status_str, "%s", "in_use");
break;
case GD_SNAP_STATUS_DECOMMISSION:
sprintf (snap_status_str, "%s", "decommissioned");
break;
case GD_SNAP_STATUS_UNDER_RESTORE:
sprintf (snap_status_str, "%s", "under_restore");
break;
case GD_SNAP_STATUS_RESTORED:
sprintf (snap_status_str, "%s", "restored");
break;
default:
goto out;
}
ret = 0;
out:
return ret;
}

View File

@ -161,6 +161,8 @@ glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo,
void
gd_get_snap_conf_values_if_present (dict_t *opts, uint64_t *sys_hard_limit,
uint64_t *sys_soft_limit);
int
glusterd_get_snap_status_str (glusterd_snap_t *snapinfo, char *snap_status_str);
#endif

View File

@ -59,6 +59,7 @@
#include "glusterd-bitd-svc.h"
#include "glusterd-server-quorum.h"
#include "quota-common-utils.h"
#include "common-utils.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@ -11225,6 +11226,199 @@ glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
return (volinfo->status == GLUSTERD_STATUS_STARTED);
}
int
glusterd_volume_get_type_str (glusterd_volinfo_t *volinfo, char **voltype_str)
{
int ret = -1;
int type = 0;
int brick_count = 0;
int dist_count = 0;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
type = get_vol_type (volinfo->type, volinfo->brick_count,
volinfo->dist_leaf_count);
*voltype_str = vol_type_str[type];
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_status_str (glusterd_volinfo_t *volinfo, char *status_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, status_str, out);
switch (volinfo->status) {
case GLUSTERD_STATUS_NONE:
sprintf (status_str, "%s", "Created");
break;
case GLUSTERD_STATUS_STARTED:
sprintf (status_str, "%s", "Started");
break;
case GLUSTERD_STATUS_STOPPED:
sprintf (status_str, "%s", "Stopped");
break;
default:
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_transport_type_str (glusterd_volinfo_t *volinfo,
char *transport_type_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, transport_type_str, out);
switch (volinfo->transport_type) {
case GF_TRANSPORT_TCP:
sprintf (transport_type_str, "%s", "tcp");
break;
case GF_TRANSPORT_RDMA:
sprintf (transport_type_str, "%s", "rdma");
break;
case GF_TRANSPORT_BOTH_TCP_RDMA:
sprintf (transport_type_str, "%s", "tcp_rdma_both");
break;
default:
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_quorum_status_str (glusterd_volinfo_t *volinfo,
char *quorum_status_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, quorum_status_str, out);
switch (volinfo->quorum_status) {
case NOT_APPLICABLE_QUORUM:
sprintf (quorum_status_str, "%s", "not_applicable");
break;
case MEETS_QUORUM:
sprintf (quorum_status_str, "%s", "meets");
break;
case DOESNT_MEET_QUORUM:
sprintf (quorum_status_str, "%s", "does_not_meet");
break;
default:
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_rebalance_status_str (glusterd_volinfo_t *volinfo,
char *rebal_status_str)
{
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, rebal_status_str, out);
switch (volinfo->rebal.defrag_status) {
case GF_DEFRAG_STATUS_NOT_STARTED:
sprintf (rebal_status_str, "%s", "not_started");
break;
case GF_DEFRAG_STATUS_STARTED:
sprintf (rebal_status_str, "%s", "started");
break;
case GF_DEFRAG_STATUS_STOPPED:
sprintf (rebal_status_str, "%s", "stopped");
break;
case GF_DEFRAG_STATUS_COMPLETE:
sprintf (rebal_status_str, "%s", "completed");
break;
case GF_DEFRAG_STATUS_FAILED:
sprintf (rebal_status_str, "%s", "failed");
break;
case GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED:
sprintf (rebal_status_str, "%s", "layout_fix_started");
break;
case GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED:
sprintf (rebal_status_str, "%s", "layout_fix_stopped");
break;
case GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE:
sprintf (rebal_status_str, "%s", "layout_fix_complete");
break;
case GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED:
sprintf (rebal_status_str, "%s", "layout_fix_failed");
break;
default:
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_hot_tier_type_str (glusterd_volinfo_t *volinfo,
char **hot_tier_type_str)
{
int ret = -1;
int hot_tier_type = 0;
int hot_dist_count = 0;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, hot_tier_type_str, out);
hot_dist_count = volinfo->tier_info.hot_replica_count ?
volinfo->tier_info.hot_replica_count : 1;
hot_tier_type = get_vol_type (volinfo->tier_info.hot_type, hot_dist_count,
volinfo->tier_info.hot_brick_count);
*hot_tier_type_str = vol_type_str[hot_tier_type];
ret = 0;
out:
return ret;
}
int
glusterd_volume_get_cold_tier_type_str (glusterd_volinfo_t *volinfo,
char **cold_tier_type_str)
{
int ret = -1;
int cold_tier_type = 0;
GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO (THIS->name, cold_tier_type_str, out);
cold_tier_type = get_vol_type (volinfo->tier_info.cold_type,
volinfo->tier_info.cold_dist_leaf_count,
volinfo->tier_info.cold_brick_count);
*cold_tier_type_str = vol_type_str[cold_tier_type];
ret = 0;
out:
return ret;
}
/* This function will insert the element to the list in a order.
Order will be based on the compare function provided as a input.
If element to be inserted in ascending order compare should return:

View File

@ -678,6 +678,32 @@ glusterd_nfs_pmap_deregister ();
gf_boolean_t
glusterd_is_volume_started (glusterd_volinfo_t *volinfo);
int
glusterd_volume_get_type_str (glusterd_volinfo_t *volinfo, char **vol_type_str);
int
glusterd_volume_get_status_str (glusterd_volinfo_t *volinfo, char *status_str);
int
glusterd_volume_get_transport_type_str (glusterd_volinfo_t *volinfo,
char *transport_type_str);
int
glusterd_volume_get_quorum_status_str (glusterd_volinfo_t *volinfo,
char *quorum_status_str);
int
glusterd_volume_get_rebalance_status_str (glusterd_volinfo_t *volinfo,
char *rebal_status_str);
int
glusterd_volume_get_hot_tier_type_str (glusterd_volinfo_t *volinfo,
char **hot_tier_type_str);
int
glusterd_volume_get_cold_tier_type_str (glusterd_volinfo_t *volinfo,
char **cold_tier_type_str);
void
glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
int (*compare)(struct cds_list_head *,

View File

@ -56,6 +56,7 @@
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100
#define GLUSTERD_SERVER_QUORUM "server"
#define STATUS_STRLEN 128
#define FMTSTR_CHECK_VOL_EXISTS "Volume %s does not exist"
#define FMTSTR_RESOLVE_BRICK "Could not find peer on which brick %s:%s resides"