mgmt/glusterd: Implement Volume heal enable/disable
For volumes with replicate, disperse xlators, self-heal daemon should do healing. This patch provides enable/disable functionality for the xlators to be part of self-heal-daemon. Replicate already had this functionality with 'gluster volume set cluster.self-heal-daemon on/off'. But this patch makes it uniform for both types of volumes. Internally it still does 'volume set' based on the volume type. Change-Id: Ie0f3799b74c2afef9ac658ef3d50dce3e8072b29 BUG: 1177601 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/9358 Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
This commit is contained in:
parent
1ee8ce725f
commit
7510d8edf4
@ -2964,7 +2964,36 @@ set_hostname_path_in_dict (const char *token, dict_t *dict, int heal_op)
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
heal_command_type_get (const char *command)
|
||||
{
|
||||
int i = 0;
|
||||
/* subcommands are set as NULL */
|
||||
char *heal_cmds[GF_AFR_OP_HEAL_DISABLE + 1] = {
|
||||
[GF_AFR_OP_INVALID] = NULL,
|
||||
[GF_AFR_OP_HEAL_INDEX] = NULL,
|
||||
[GF_AFR_OP_HEAL_FULL] = "full",
|
||||
[GF_AFR_OP_INDEX_SUMMARY] = "info",
|
||||
[GF_AFR_OP_HEALED_FILES] = NULL,
|
||||
[GF_AFR_OP_HEAL_FAILED_FILES] = NULL,
|
||||
[GF_AFR_OP_SPLIT_BRAIN_FILES] = NULL,
|
||||
[GF_AFR_OP_STATISTICS] = "statistics",
|
||||
[GF_AFR_OP_STATISTICS_HEAL_COUNT] = NULL,
|
||||
[GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA] = NULL,
|
||||
[GF_AFR_OP_SBRAIN_HEAL_FROM_BIGGER_FILE] = "split-brain",
|
||||
[GF_AFR_OP_SBRAIN_HEAL_FROM_BRICK] = "split-brain",
|
||||
[GF_AFR_OP_HEAL_ENABLE] = "enable",
|
||||
[GF_AFR_OP_HEAL_DISABLE] = "disable",
|
||||
};
|
||||
|
||||
for (i = 0; i <= GF_AFR_OP_HEAL_DISABLE; i++) {
|
||||
if (heal_cmds[i] && (strcmp (heal_cmds[i], command) == 0))
|
||||
return i;
|
||||
}
|
||||
|
||||
return GF_AFR_OP_INVALID;
|
||||
}
|
||||
|
||||
int
|
||||
@ -2973,6 +3002,9 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,
|
||||
{
|
||||
int ret = 0;
|
||||
dict_t *dict = NULL;
|
||||
char *hostname = NULL;
|
||||
char *path = NULL;
|
||||
gf_xl_afr_op_t op = GF_AFR_OP_INVALID;
|
||||
|
||||
dict = dict_new ();
|
||||
if (!dict)
|
||||
@ -2990,24 +3022,16 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,
|
||||
}
|
||||
|
||||
if (wordcount == 4) {
|
||||
if (!strcmp (words[3], "full")) {
|
||||
ret = dict_set_int32 (dict, "heal-op",
|
||||
GF_AFR_OP_HEAL_FULL);
|
||||
goto done;
|
||||
} else if (!strcmp (words[3], "statistics")) {
|
||||
ret = dict_set_int32 (dict, "heal-op",
|
||||
GF_AFR_OP_STATISTICS);
|
||||
goto done;
|
||||
|
||||
} else if (!strcmp (words[3], "info")) {
|
||||
ret = dict_set_int32 (dict, "heal-op",
|
||||
GF_AFR_OP_INDEX_SUMMARY);
|
||||
goto done;
|
||||
} else {
|
||||
op = heal_command_type_get (words[3]);
|
||||
if (op == GF_AFR_OP_INVALID) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = dict_set_int32 (dict, "heal-op", op);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (wordcount == 5) {
|
||||
if (strcmp (words[3], "info") &&
|
||||
strcmp (words[3], "statistics")) {
|
||||
|
@ -2356,10 +2356,11 @@ struct cli_cmd volume_cmds[] = {
|
||||
cli_cmd_volume_status_cbk,
|
||||
"display status of all or specified volume(s)/brick"},
|
||||
|
||||
{ "volume heal <VOLNAME> [full | statistics [heal-count "\
|
||||
"[replica <HOSTNAME:BRICKNAME>]] |info [healed | heal-failed | "\
|
||||
"split-brain]| split-brain {bigger-file <FILE> |source-brick "\
|
||||
"<HOSTNAME:BRICKNAME> [<FILE>]}]",
|
||||
{ "volume heal <VOLNAME> [enable | disable | full |"
|
||||
"statistics [heal-count [replica <HOSTNAME:BRICKNAME>]] |"
|
||||
"info [healed | heal-failed | split-brain] |"
|
||||
"split-brain {bigger-file <FILE> |"
|
||||
"source-brick <HOSTNAME:BRICKNAME> [<FILE>]}]",
|
||||
cli_cmd_volume_heal_cbk,
|
||||
"self-heal commands on volume specified by <VOLNAME>"},
|
||||
|
||||
|
@ -7270,6 +7270,30 @@ out:
|
||||
return;
|
||||
}
|
||||
|
||||
int
|
||||
gf_is_cli_heal_get_command (gf_xl_afr_op_t heal_op)
|
||||
{
|
||||
/* If the command is get command value is 1 otherwise 0, for
|
||||
invalid commands -1 */
|
||||
int get_cmds[GF_AFR_OP_HEAL_DISABLE + 1] = {
|
||||
[GF_AFR_OP_INVALID] = -1,
|
||||
[GF_AFR_OP_HEAL_INDEX] = 0,
|
||||
[GF_AFR_OP_HEAL_FULL] = 0,
|
||||
[GF_AFR_OP_INDEX_SUMMARY] = 1,
|
||||
[GF_AFR_OP_HEALED_FILES] = 1,
|
||||
[GF_AFR_OP_HEAL_FAILED_FILES] = 1,
|
||||
[GF_AFR_OP_SPLIT_BRAIN_FILES] = 1,
|
||||
[GF_AFR_OP_STATISTICS] = 1,
|
||||
[GF_AFR_OP_STATISTICS_HEAL_COUNT] = 1,
|
||||
[GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA] = 1,
|
||||
[GF_AFR_OP_HEAL_ENABLE] = 0,
|
||||
[GF_AFR_OP_HEAL_DISABLE] = 0,
|
||||
};
|
||||
|
||||
if (heal_op > GF_AFR_OP_INVALID && heal_op <= GF_AFR_OP_HEAL_DISABLE)
|
||||
return get_cmds[heal_op] == 1;
|
||||
return _gf_false;
|
||||
}
|
||||
|
||||
int
|
||||
gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
|
||||
@ -7330,12 +7354,20 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
|
||||
|
||||
gf_log ("cli", GF_LOG_INFO, "Received resp to heal volume");
|
||||
|
||||
operation = "Gathering ";
|
||||
substr = "";
|
||||
switch (heal_op) {
|
||||
case GF_AFR_OP_HEAL_INDEX:
|
||||
operation = "Launching heal operation ";
|
||||
heal_op_str = "to perform index self heal";
|
||||
substr = "\nUse heal info commands to check"
|
||||
" status";
|
||||
break;
|
||||
case GF_AFR_OP_HEAL_FULL:
|
||||
operation = "Launching heal operation ";
|
||||
heal_op_str = "to perform full self heal";
|
||||
substr = "\nUse heal info commands to check"
|
||||
" status";
|
||||
break;
|
||||
case GF_AFR_OP_INDEX_SUMMARY:
|
||||
heal_op_str = "list of entries to be healed";
|
||||
@ -7367,35 +7399,33 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
|
||||
case GF_AFR_OP_INVALID:
|
||||
heal_op_str = "invalid heal op";
|
||||
break;
|
||||
}
|
||||
|
||||
if ((heal_op == GF_AFR_OP_HEAL_FULL) ||
|
||||
(heal_op == GF_AFR_OP_HEAL_INDEX)) {
|
||||
operation = "Launching heal operation";
|
||||
substr = "\nUse heal info commands to check status";
|
||||
} else {
|
||||
operation = "Gathering";
|
||||
substr = "";
|
||||
case GF_AFR_OP_HEAL_ENABLE:
|
||||
operation = "";
|
||||
heal_op_str = "Enable heal";
|
||||
break;
|
||||
case GF_AFR_OP_HEAL_DISABLE:
|
||||
operation = "";
|
||||
heal_op_str = "Disable heal";
|
||||
break;
|
||||
}
|
||||
|
||||
if (rsp.op_ret) {
|
||||
if (strcmp (rsp.op_errstr, "")) {
|
||||
cli_err ("%s", rsp.op_errstr);
|
||||
} else {
|
||||
cli_err ("%s %s on volume %s has been unsuccessful",
|
||||
cli_err ("%s%s on volume %s has been unsuccessful",
|
||||
operation, heal_op_str, volname);
|
||||
}
|
||||
|
||||
ret = rsp.op_ret;
|
||||
goto out;
|
||||
} else {
|
||||
cli_out ("%s %s on volume %s has been successful %s", operation,
|
||||
cli_out ("%s%s on volume %s has been successful %s", operation,
|
||||
heal_op_str, volname, substr);
|
||||
}
|
||||
|
||||
ret = rsp.op_ret;
|
||||
if ((heal_op == GF_AFR_OP_HEAL_FULL) ||
|
||||
(heal_op == GF_AFR_OP_HEAL_INDEX))
|
||||
if (!gf_is_cli_heal_get_command (heal_op))
|
||||
goto out;
|
||||
|
||||
dict = dict_new ();
|
||||
|
@ -3630,3 +3630,21 @@ recursive_rmdir (const char *delete_path)
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Input: Array of strings 'array' terminating in NULL
|
||||
* string 'elem' to be searched in the array
|
||||
*
|
||||
* Output: Index of the element in the array if found, '-1' otherwise
|
||||
*/
|
||||
int
|
||||
gf_get_index_by_elem (char **array, char *elem)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; array[i]; i++) {
|
||||
if (strcmp (elem, array[i]) == 0)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
@ -677,4 +677,6 @@ gf_build_absolute_path (char *current_path, char *relative_path, char **path);
|
||||
int
|
||||
recursive_rmdir (const char *delete_path);
|
||||
|
||||
int
|
||||
gf_get_index_by_elem (char **array, char *elem);
|
||||
#endif /* _COMMON_UTILS_H */
|
||||
|
@ -233,6 +233,8 @@ typedef enum {
|
||||
GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA,
|
||||
GF_AFR_OP_SBRAIN_HEAL_FROM_BIGGER_FILE,
|
||||
GF_AFR_OP_SBRAIN_HEAL_FROM_BRICK,
|
||||
GF_AFR_OP_HEAL_ENABLE,
|
||||
GF_AFR_OP_HEAL_DISABLE,
|
||||
} gf_xl_afr_op_t ;
|
||||
|
||||
struct gf_gsync_detailed_status_ {
|
||||
|
89
tests/basic/glusterd/heald.t
Normal file
89
tests/basic/glusterd/heald.t
Normal file
@ -0,0 +1,89 @@
|
||||
#!/bin/bash
|
||||
|
||||
. $(dirname $0)/../../include.rc
|
||||
. $(dirname $0)/../../volume.rc
|
||||
|
||||
# This test contains volume heal commands handled by glusterd.
|
||||
# Covers enable/disable at the moment. Will be enhanced later to include
|
||||
# the other commands as well.
|
||||
|
||||
cleanup;
|
||||
TEST glusterd
|
||||
TEST pidof glusterd
|
||||
|
||||
volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
|
||||
#Commands should fail when volume doesn't exist
|
||||
TEST ! $CLI volume heal non-existent-volume enable
|
||||
TEST ! $CLI volume heal non-existent-volume disable
|
||||
|
||||
# Commands should fail when volume is of distribute/stripe type.
|
||||
# Glustershd shouldn't be running as long as there are no replicate/disperse
|
||||
# volumes
|
||||
TEST $CLI volume create dist $H0:$B0/dist
|
||||
TEST $CLI volume start dist
|
||||
TEST "[ -z $(get_shd_process_pid)]"
|
||||
TEST ! $CLI volume heal dist enable
|
||||
TEST ! $CLI volume heal dist disable
|
||||
TEST $CLI volume create st stripe 3 $H0:$B0/st1 $H0:$B0/st2 $H0:$B0/st3
|
||||
TEST $CLI volume start st
|
||||
TEST "[ -z $(get_shd_process_pid)]"
|
||||
TEST ! $CLI volume heal st
|
||||
TEST ! $CLI volume heal st disable
|
||||
|
||||
# Commands should work on replicate/disperse volume.
|
||||
TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
|
||||
TEST "[ -z $(get_shd_process_pid)]"
|
||||
TEST $CLI volume start r2
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal r2 enable
|
||||
EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
|
||||
EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal r2 disable
|
||||
EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
|
||||
EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
|
||||
# Commands should work on disperse volume.
|
||||
TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
|
||||
TEST $CLI volume start ec2
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal ec2 enable
|
||||
EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
|
||||
EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
TEST $CLI volume heal ec2 disable
|
||||
EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
|
||||
EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
|
||||
EXPECT "[0-9][0-9]*" get_shd_process_pid
|
||||
|
||||
#Check that shd graph is rewritten correctly on volume stop/start
|
||||
EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
TEST $CLI volume stop r2
|
||||
EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
TEST $CLI volume stop ec2
|
||||
# When both the volumes are stopped glustershd volfile is not modified just the
|
||||
# process is stopped
|
||||
TEST "[ -z $(get_shd_process_pid) ]"
|
||||
|
||||
TEST $CLI volume start r2
|
||||
EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
|
||||
EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
|
||||
|
||||
TEST $CLI volume set r2 self-heal-daemon on
|
||||
TEST $CLI volume set r2 cluster.self-heal-daemon off
|
||||
TEST ! $CLI volume set ec2 self-heal-daemon off
|
||||
TEST ! $CLI volume set ec2 cluster.self-heal-daemon on
|
||||
TEST ! $CLI volume set dist self-heal-daemon off
|
||||
TEST ! $CLI volume set dist cluster.self-heal-daemon on
|
||||
|
||||
TEST $CLI volume set ec2 disperse-self-heal-daemon off
|
||||
TEST $CLI volume set ec2 cluster.disperse-self-heal-daemon on
|
||||
TEST ! $CLI volume set r2 disperse-self-heal-daemon on
|
||||
TEST ! $CLI volume set r2 cluster.disperse-self-heal-daemon off
|
||||
TEST ! $CLI volume set dist disperse-self-heal-daemon off
|
||||
TEST ! $CLI volume set dist cluster.disperse-self-heal-daemon on
|
||||
|
||||
cleanup
|
@ -135,18 +135,15 @@ function ec_child_up_count {
|
||||
}
|
||||
|
||||
function get_shd_process_pid {
|
||||
local vol=$1
|
||||
ps auxww | grep glusterfs | grep -E "glustershd/run/glustershd.pid" | awk '{print $2}' | head -1
|
||||
}
|
||||
|
||||
function generate_shd_statedump {
|
||||
local vol=$1
|
||||
generate_statedump $(get_shd_process_pid $vol)
|
||||
generate_statedump $(get_shd_process_pid)
|
||||
}
|
||||
|
||||
function generate_nfs_statedump {
|
||||
local vol=$1
|
||||
generate_statedump $(get_nfs_pid $vol)
|
||||
generate_statedump $(get_nfs_pid)
|
||||
}
|
||||
|
||||
function generate_brick_statedump {
|
||||
@ -425,3 +422,26 @@ function assign_gfid {
|
||||
function get_random_gfid {
|
||||
echo "0x"$(uuidgen | awk -F '-' 'BEGIN {OFS=""} {print $1,$2,$3,$4,$5}')
|
||||
}
|
||||
|
||||
function volgen_volume_exists {
|
||||
local volfile="$1"
|
||||
local xl_vol="$2"
|
||||
local xl_type="$3"
|
||||
local xl_feature="$4"
|
||||
xl=$(sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d" $volfile)
|
||||
if [ -z "$xl" ];
|
||||
then
|
||||
echo "N"
|
||||
else
|
||||
echo "Y"
|
||||
fi
|
||||
}
|
||||
|
||||
function volgen_volume_option {
|
||||
local volfile="$1"
|
||||
local xl_vol="$2"
|
||||
local xl_type="$3"
|
||||
local xl_feature="$4"
|
||||
local xl_option="$5"
|
||||
sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
|
||||
}
|
||||
|
@ -181,12 +181,18 @@ int32_t mem_acct_init(xlator_t * this)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t reconfigure(xlator_t * this, dict_t * options)
|
||||
int32_t
|
||||
reconfigure (xlator_t *this, dict_t *options)
|
||||
{
|
||||
gf_log(this->name, GF_LOG_ERROR, "Online volume reconfiguration is not "
|
||||
"supported.");
|
||||
ec_t *ec = this->private;
|
||||
|
||||
return -1;
|
||||
GF_OPTION_RECONF ("self-heal-daemon", ec->shd, options, bool, failed);
|
||||
GF_OPTION_RECONF ("iam-self-heal-daemon", ec->iamshd, options,
|
||||
bool, failed);
|
||||
|
||||
return 0;
|
||||
failed:
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ec_up(xlator_t * this, ec_t * ec)
|
||||
@ -336,9 +342,10 @@ int32_t notify(xlator_t * this, int32_t event, void * data, ...)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t init(xlator_t * this)
|
||||
int32_t
|
||||
init (xlator_t *this)
|
||||
{
|
||||
ec_t * ec;
|
||||
ec_t *ec = NULL;
|
||||
|
||||
if (this->parents == NULL)
|
||||
{
|
||||
@ -385,6 +392,8 @@ int32_t init(xlator_t * this)
|
||||
}
|
||||
|
||||
ec_method_initialize();
|
||||
GF_OPTION_INIT ("self-heal-daemon", ec->shd, bool, failed);
|
||||
GF_OPTION_INIT ("iam-self-heal-daemon", ec->iamshd, bool, failed);
|
||||
|
||||
gf_log(this->name, GF_LOG_DEBUG, "Disperse translator initialized.");
|
||||
|
||||
@ -977,5 +986,18 @@ struct volume_options options[] =
|
||||
.description = "Maximum number of bricks that can fail "
|
||||
"simultaneously without losing data."
|
||||
},
|
||||
{
|
||||
.key = { "self-heal-daemon" },
|
||||
.type = GF_OPTION_TYPE_BOOL,
|
||||
.description = "self-heal daemon enable/disable",
|
||||
.default_value = "enable",
|
||||
},
|
||||
{ .key = {"iam-self-heal-daemon"},
|
||||
.type = GF_OPTION_TYPE_BOOL,
|
||||
.default_value = "off",
|
||||
.description = "This option differentiates if the disperse "
|
||||
"translator is running as part of self-heal-daemon "
|
||||
"or not."
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
@ -43,6 +43,8 @@ struct _ec
|
||||
struct mem_pool * fop_pool;
|
||||
struct mem_pool * cbk_pool;
|
||||
struct mem_pool * lock_pool;
|
||||
gf_boolean_t shd;
|
||||
gf_boolean_t iamshd;
|
||||
};
|
||||
|
||||
#endif /* __EC_H__ */
|
||||
|
@ -666,7 +666,6 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
|
||||
gf_boolean_t origin_glusterd = _gf_true;
|
||||
gf_boolean_t check_op_version = _gf_true;
|
||||
gf_boolean_t all_vol = _gf_false;
|
||||
struct volopt_map_entry *vme = NULL;
|
||||
|
||||
GF_ASSERT (dict);
|
||||
this = THIS;
|
||||
@ -826,17 +825,10 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
|
||||
if (is_key_glusterd_hooks_friendly (key))
|
||||
continue;
|
||||
|
||||
for (vme = &glusterd_volopt_map[0]; vme->key; vme++) {
|
||||
if ((vme->validate_fn) &&
|
||||
((!strcmp (key, vme->key)) ||
|
||||
(!strcmp (key, strchr (vme->key, '.') + 1)))) {
|
||||
ret = vme->validate_fn (dict, key, value,
|
||||
op_errstr);
|
||||
if (ret)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = glusterd_volopt_validate (volinfo, dict, key, value,
|
||||
op_errstr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
exists = glusterd_check_option_exists (key, &key_fixed);
|
||||
if (exists == -1) {
|
||||
|
@ -4964,7 +4964,7 @@ glusterd_nodesvcs_batch_op (glusterd_volinfo_t *volinfo, int (*nfs_op) (),
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (volinfo && !glusterd_is_volume_replicate (volinfo)) {
|
||||
if (volinfo && !glusterd_is_shd_compatible_volume (volinfo)) {
|
||||
; //do nothing
|
||||
} else {
|
||||
ret = shd_op ();
|
||||
@ -5026,7 +5026,7 @@ glusterd_are_all_volumes_stopped ()
|
||||
}
|
||||
|
||||
gf_boolean_t
|
||||
glusterd_all_replicate_volumes_stopped ()
|
||||
glusterd_all_shd_compatible_volumes_stopped ()
|
||||
{
|
||||
glusterd_conf_t *priv = NULL;
|
||||
xlator_t *this = NULL;
|
||||
@ -5038,7 +5038,7 @@ glusterd_all_replicate_volumes_stopped ()
|
||||
GF_ASSERT (priv);
|
||||
|
||||
list_for_each_entry (voliter, &priv->volumes, vol_list) {
|
||||
if (!glusterd_is_volume_replicate (voliter))
|
||||
if (!glusterd_is_shd_compatible_volume (voliter))
|
||||
continue;
|
||||
if (voliter->status == GLUSTERD_STATUS_STARTED)
|
||||
return _gf_false;
|
||||
@ -5088,7 +5088,7 @@ glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo)
|
||||
nfs_op = glusterd_nfs_server_stop;
|
||||
qd_op = glusterd_quotad_stop;
|
||||
} else {
|
||||
if (glusterd_all_replicate_volumes_stopped()) {
|
||||
if (glusterd_all_shd_compatible_volumes_stopped()) {
|
||||
shd_op = glusterd_shd_stop;
|
||||
}
|
||||
if (glusterd_all_volumes_with_quota_stopped ()) {
|
||||
@ -6997,6 +6997,19 @@ glusterd_is_volume_replicate (glusterd_volinfo_t *volinfo)
|
||||
return replicates;
|
||||
}
|
||||
|
||||
gf_boolean_t
|
||||
glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo)
|
||||
{
|
||||
switch (volinfo->type) {
|
||||
case GF_CLUSTER_TYPE_REPLICATE:
|
||||
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
|
||||
case GF_CLUSTER_TYPE_DISPERSE:
|
||||
return _gf_true;
|
||||
|
||||
}
|
||||
return _gf_false;
|
||||
}
|
||||
|
||||
int
|
||||
glusterd_set_dump_options (char *dumpoptions_path, char *options,
|
||||
int option_cnt)
|
||||
|
@ -747,4 +747,7 @@ int
|
||||
glusterd_import_quota_conf (dict_t *peer_data, int vol_idx,
|
||||
glusterd_volinfo_t *new_volinfo,
|
||||
char *prefix);
|
||||
|
||||
gf_boolean_t
|
||||
glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo);
|
||||
#endif
|
||||
|
@ -889,6 +889,37 @@ glusterd_check_option_exists (char *key, char **completion)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
||||
char *value, char **op_errstr)
|
||||
{
|
||||
struct volopt_map_entry *vme = NULL;
|
||||
char *volname = NULL;
|
||||
int ret = 0;
|
||||
xlator_t *this = THIS;
|
||||
|
||||
if (!dict || !key || !value) {
|
||||
gf_log_callingfn (this->name, GF_LOG_WARNING, "Invalid "
|
||||
"Arguments (dict=%p, key=%s, value=%s)", dict,
|
||||
key, value);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (vme = &glusterd_volopt_map[0]; vme->key; vme++) {
|
||||
if ((vme->validate_fn) &&
|
||||
((!strcmp (key, vme->key)) ||
|
||||
(!strcmp (key, strchr (vme->key, '.') + 1)))) {
|
||||
ret = vme->validate_fn (volinfo, dict, key, value,
|
||||
op_errstr);
|
||||
if (ret)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
char*
|
||||
glusterd_get_trans_type_rb (gf_transport_type ttype)
|
||||
{
|
||||
@ -2555,6 +2586,41 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
volgen_graph_build_ec_clusters (volgen_graph_t *graph,
|
||||
glusterd_volinfo_t *volinfo)
|
||||
{
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
int clusters = 0;
|
||||
char *disperse_args[] = {"cluster/disperse",
|
||||
"%s-disperse-%d"};
|
||||
xlator_t *ec = NULL;
|
||||
char option[32] = {0};
|
||||
|
||||
clusters = volgen_graph_build_clusters (graph, volinfo,
|
||||
disperse_args[0],
|
||||
disperse_args[1],
|
||||
volinfo->brick_count,
|
||||
volinfo->disperse_count);
|
||||
if (clusters < 0)
|
||||
goto out;
|
||||
|
||||
sprintf(option, "%d", volinfo->redundancy_count);
|
||||
ec = first_of (graph);
|
||||
for (i = 0; i < clusters; i++) {
|
||||
ret = xlator_set_option (ec, "redundancy", option);
|
||||
if (ret) {
|
||||
clusters = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ec = ec->next;
|
||||
}
|
||||
out:
|
||||
return clusters;
|
||||
}
|
||||
|
||||
static int
|
||||
volume_volgen_graph_build_clusters (volgen_graph_t *graph,
|
||||
glusterd_volinfo_t *volinfo,
|
||||
@ -2564,14 +2630,10 @@ volume_volgen_graph_build_clusters (volgen_graph_t *graph,
|
||||
"%s-replicate-%d"};
|
||||
char *stripe_args[] = {"cluster/stripe",
|
||||
"%s-stripe-%d"};
|
||||
char *disperse_args[] = {"cluster/disperse",
|
||||
"%s-disperse-%d"};
|
||||
char option[32] = "";
|
||||
int rclusters = 0;
|
||||
int clusters = 0;
|
||||
int dist_count = 0;
|
||||
int ret = -1;
|
||||
xlator_t * ec = NULL;
|
||||
|
||||
if (!volinfo->dist_leaf_count)
|
||||
goto out;
|
||||
@ -2621,25 +2683,12 @@ volume_volgen_graph_build_clusters (volgen_graph_t *graph,
|
||||
if (clusters < 0)
|
||||
goto out;
|
||||
break;
|
||||
|
||||
case GF_CLUSTER_TYPE_DISPERSE:
|
||||
clusters = volgen_graph_build_clusters (graph, volinfo,
|
||||
disperse_args[0],
|
||||
disperse_args[1],
|
||||
volinfo->brick_count,
|
||||
volinfo->disperse_count);
|
||||
clusters = volgen_graph_build_ec_clusters (graph, volinfo);
|
||||
if (clusters < 0)
|
||||
goto out;
|
||||
|
||||
sprintf(option, "%d", volinfo->redundancy_count);
|
||||
ec = first_of (graph);
|
||||
while (clusters-- > 0) {
|
||||
ret = xlator_set_option (ec, "redundancy", option);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ec = ec->next;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
gf_log ("", GF_LOG_ERROR, "volume inconsistency: "
|
||||
@ -2687,6 +2736,52 @@ static int client_graph_set_perf_options(volgen_graph_t *graph,
|
||||
&nfsperfxl_option_handler);
|
||||
}
|
||||
|
||||
static int
|
||||
graph_set_generic_options (xlator_t *this, volgen_graph_t *graph,
|
||||
dict_t *set_dict, char *identifier)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&loglevel_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing %s log level"
|
||||
" failed", identifier);
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&sys_loglevel_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing %s syslog "
|
||||
"level failed", identifier);
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&logger_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing %s logger"
|
||||
" failed", identifier);
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_format_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing %s log format"
|
||||
" failed", identifier);
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_buf_size_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "Failed to change "
|
||||
"log-buf-size option");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_flush_timeout_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "Failed to change "
|
||||
"log-flush-timeout option");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
|
||||
dict_t *set_dict, void *param)
|
||||
@ -2915,44 +3010,7 @@ client_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
|
||||
if (!xl)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&loglevel_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing client log level"
|
||||
" failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&sys_loglevel_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing client syslog "
|
||||
"level failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&logger_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing client logger"
|
||||
" failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_format_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing client log format"
|
||||
" failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_buf_size_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "Failed to change "
|
||||
"log-buf-size option");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict, "client",
|
||||
&log_flush_timeout_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "Failed to change "
|
||||
"log-flush-timeout option");
|
||||
|
||||
ret = graph_set_generic_options (this, graph, set_dict, "client");
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -3210,6 +3268,38 @@ nfs_option_handler (volgen_graph_t *graph,
|
||||
return 0;
|
||||
}
|
||||
|
||||
char*
|
||||
volgen_get_shd_key (glusterd_volinfo_t *volinfo)
|
||||
{
|
||||
char *key = NULL;
|
||||
|
||||
switch (volinfo->type) {
|
||||
case GF_CLUSTER_TYPE_REPLICATE:
|
||||
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
|
||||
key = "cluster.self-heal-daemon";
|
||||
break;
|
||||
case GF_CLUSTER_TYPE_DISPERSE:
|
||||
key = "cluster.disperse-self-heal-daemon";
|
||||
break;
|
||||
default:
|
||||
key = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
static gf_boolean_t
|
||||
volgen_is_shd_compatible_xl (char *xl_type)
|
||||
{
|
||||
char *shd_xls[] = {"cluster/replicate", "cluster/disperse",
|
||||
NULL};
|
||||
if (gf_get_index_by_elem (shd_xls, xl_type) != -1)
|
||||
return _gf_true;
|
||||
|
||||
return _gf_false;
|
||||
}
|
||||
|
||||
static int
|
||||
volgen_graph_set_iam_shd (volgen_graph_t *graph)
|
||||
{
|
||||
@ -3217,7 +3307,7 @@ volgen_graph_set_iam_shd (volgen_graph_t *graph)
|
||||
int ret = 0;
|
||||
|
||||
for (trav = first_of (graph); trav; trav = trav->next) {
|
||||
if (strcmp (trav->type, "cluster/replicate") != 0)
|
||||
if (!volgen_is_shd_compatible_xl (trav->type))
|
||||
continue;
|
||||
|
||||
ret = xlator_set_option (trav, "iam-self-heal-daemon", "yes");
|
||||
@ -3228,9 +3318,125 @@ volgen_graph_set_iam_shd (volgen_graph_t *graph)
|
||||
}
|
||||
|
||||
static int
|
||||
build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
|
||||
prepare_shd_volume_options (glusterd_volinfo_t *volinfo,
|
||||
dict_t *mod_dict, dict_t *set_dict)
|
||||
{
|
||||
char *key = NULL;
|
||||
int ret = 0;
|
||||
|
||||
key = volgen_get_shd_key (volinfo);
|
||||
if (!key) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = dict_set_str (set_dict, key, "enable");
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = dict_set_uint32 (set_dict, "trusted-client", GF_CLIENT_TRUSTED);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dict_copy (volinfo->dict, set_dict);
|
||||
if (mod_dict)
|
||||
dict_copy (mod_dict, set_dict);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
volgen_graph_build_replicate_clusters (volgen_graph_t *graph,
|
||||
glusterd_volinfo_t *volinfo)
|
||||
{
|
||||
char *replicate_args[] = {"cluster/replicate",
|
||||
"%s-replicate-%d"};
|
||||
|
||||
return volgen_graph_build_clusters (graph, volinfo, "cluster/replicate",
|
||||
"%s-replicate-%d",
|
||||
volinfo->brick_count,
|
||||
volinfo->replica_count);
|
||||
}
|
||||
|
||||
static int
|
||||
build_shd_clusters (volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
|
||||
dict_t *set_dict)
|
||||
{
|
||||
int ret = 0;
|
||||
int clusters = -1;
|
||||
|
||||
ret = volgen_graph_build_clients (graph, volinfo, set_dict, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
switch (volinfo->type) {
|
||||
case GF_CLUSTER_TYPE_REPLICATE:
|
||||
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
|
||||
clusters = volgen_graph_build_replicate_clusters (graph,
|
||||
volinfo);
|
||||
break;
|
||||
|
||||
case GF_CLUSTER_TYPE_DISPERSE:
|
||||
clusters = volgen_graph_build_ec_clusters (graph, volinfo);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
return clusters;
|
||||
}
|
||||
|
||||
static int
|
||||
build_shd_volume_graph (xlator_t *this, volgen_graph_t *graph,
|
||||
glusterd_volinfo_t *volinfo,
|
||||
dict_t *mod_dict, dict_t *set_dict,
|
||||
gf_boolean_t graph_check, gf_boolean_t *valid_config)
|
||||
{
|
||||
volgen_graph_t cgraph = {0};
|
||||
int ret = 0;
|
||||
int clusters = -1;
|
||||
|
||||
if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED))
|
||||
goto out;
|
||||
|
||||
if (!glusterd_is_shd_compatible_volume (volinfo))
|
||||
goto out;
|
||||
|
||||
/* Shd graph is valid only when there is at least one
|
||||
* replica/disperse volume is present
|
||||
*/
|
||||
*valid_config = _gf_true;
|
||||
|
||||
ret = prepare_shd_volume_options (volinfo, mod_dict, set_dict);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
clusters = build_shd_clusters (&cgraph, volinfo, set_dict);
|
||||
if (clusters < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = volgen_graph_set_options_generic (&cgraph, set_dict,
|
||||
volinfo, shd_option_handler);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_set_iam_shd (&cgraph);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_merge_sub (graph, &cgraph, clusters);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = graph_set_generic_options (this, graph, set_dict,
|
||||
"self-heal daemon");
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
|
||||
{
|
||||
glusterd_volinfo_t *voliter = NULL;
|
||||
xlator_t *this = NULL;
|
||||
glusterd_conf_t *priv = NULL;
|
||||
@ -3238,8 +3444,7 @@ build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
|
||||
int ret = 0;
|
||||
gf_boolean_t valid_config = _gf_false;
|
||||
xlator_t *iostxl = NULL;
|
||||
int rclusters = 0;
|
||||
int replica_count = 0;
|
||||
int clusters = 0;
|
||||
gf_boolean_t graph_check = _gf_false;
|
||||
|
||||
this = THIS;
|
||||
@ -3259,104 +3464,9 @@ build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
|
||||
}
|
||||
|
||||
list_for_each_entry (voliter, &priv->volumes, vol_list) {
|
||||
if (!graph_check &&
|
||||
(voliter->status != GLUSTERD_STATUS_STARTED))
|
||||
continue;
|
||||
|
||||
if (!glusterd_is_volume_replicate (voliter))
|
||||
continue;
|
||||
|
||||
replica_count = voliter->replica_count;
|
||||
|
||||
valid_config = _gf_true;
|
||||
|
||||
ret = dict_set_str (set_dict, "cluster.self-heal-daemon", "on");
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = dict_set_uint32 (set_dict, "trusted-client",
|
||||
GF_CLIENT_TRUSTED);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dict_copy (voliter->dict, set_dict);
|
||||
if (mod_dict)
|
||||
dict_copy (mod_dict, set_dict);
|
||||
|
||||
memset (&cgraph, 0, sizeof (cgraph));
|
||||
ret = volgen_graph_build_clients (&cgraph, voliter, set_dict,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rclusters = volgen_graph_build_clusters (&cgraph, voliter,
|
||||
"cluster/replicate",
|
||||
"%s-replicate-%d",
|
||||
voliter->brick_count,
|
||||
replica_count);
|
||||
if (rclusters < 0) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = volgen_graph_set_options_generic (&cgraph, set_dict, voliter,
|
||||
shd_option_handler);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_set_iam_shd (&cgraph);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_merge_sub (graph, &cgraph, rclusters);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&loglevel_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing loglevel "
|
||||
"of self-heal daemon failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&sys_loglevel_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing syslog "
|
||||
"level of self-heal daemon failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&logger_option_handler);
|
||||
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing logger "
|
||||
"of self-heal daemon failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&log_format_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing log "
|
||||
"format of self-heal daemon failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&log_buf_size_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing "
|
||||
"log-buf-size for self-heal daemon failed");
|
||||
|
||||
ret = volgen_graph_set_options_generic (graph, set_dict,
|
||||
"client",
|
||||
&log_flush_timeout_option_handler);
|
||||
if (ret)
|
||||
gf_log (this->name, GF_LOG_WARNING, "changing "
|
||||
"log-flush-timeout for self-heal daemon "
|
||||
"failed");
|
||||
|
||||
ret = build_shd_volume_graph (this, graph, voliter, mod_dict,
|
||||
set_dict, graph_check,
|
||||
&valid_config);
|
||||
|
||||
ret = dict_reset (set_dict);
|
||||
if (ret)
|
||||
@ -4324,7 +4434,7 @@ validate_shdopts (glusterd_volinfo_t *volinfo,
|
||||
|
||||
graph.errstr = op_errstr;
|
||||
|
||||
if (!glusterd_is_volume_replicate (volinfo)) {
|
||||
if (!glusterd_is_shd_compatible_volume (volinfo)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -100,8 +100,8 @@ typedef enum {
|
||||
|
||||
typedef enum { DOC, NO_DOC, GLOBAL_DOC, GLOBAL_NO_DOC } option_type_t;
|
||||
|
||||
typedef int (*vme_option_validation) (dict_t *dict, char *key, char *value,
|
||||
char **op_errstr);
|
||||
typedef int (*vme_option_validation) (glusterd_volinfo_t *volinfo, dict_t *dict,
|
||||
char *key, char *value, char **op_errstr);
|
||||
|
||||
struct volopt_map_entry {
|
||||
char *key;
|
||||
@ -230,4 +230,10 @@ gd_is_xlator_option (char *key);
|
||||
gf_boolean_t
|
||||
gd_is_boolean_option (char *key);
|
||||
|
||||
char*
|
||||
volgen_get_shd_key (glusterd_volinfo_t *volinfo);
|
||||
|
||||
int
|
||||
glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
||||
char *value, char **op_errstr);
|
||||
#endif
|
||||
|
@ -638,6 +638,59 @@ glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
|
||||
__glusterd_handle_cli_delete_volume);
|
||||
}
|
||||
|
||||
static int
|
||||
glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
|
||||
glusterd_volinfo_t *volinfo)
|
||||
{
|
||||
gf_xl_afr_op_t heal_op = GF_AFR_OP_INVALID;
|
||||
int ret = 0;
|
||||
xlator_t *this = THIS;
|
||||
char *key = NULL;
|
||||
char *value = NULL;
|
||||
|
||||
ret = dict_get_int32 (dict, "heal-op", (int32_t *)&heal_op);
|
||||
if (ret || (heal_op == GF_AFR_OP_INVALID)) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((heal_op != GF_AFR_OP_HEAL_ENABLE) &&
|
||||
(heal_op != GF_AFR_OP_HEAL_DISABLE)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
key = volgen_get_shd_key (volinfo);
|
||||
if (!key) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Convert this command to volume-set command based on volume type */
|
||||
ret = dict_set_str (dict, "key1", key);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (heal_op == GF_AFR_OP_HEAL_ENABLE) {
|
||||
value = "enable";
|
||||
} else if (heal_op == GF_AFR_OP_HEAL_DISABLE) {
|
||||
value = "disable";
|
||||
}
|
||||
|
||||
ret = dict_set_str (dict, "value1", value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = dict_set_int32 (dict, "count", 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
__glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
|
||||
{
|
||||
@ -696,7 +749,21 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
|
||||
if (ret) {
|
||||
snprintf (op_errstr, sizeof (op_errstr),
|
||||
"Volume %s does not exist", volname);
|
||||
gf_log (this->name, GF_LOG_ERROR, "%s", op_errstr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = glusterd_handle_heal_enable_disable (req, dict, volinfo);
|
||||
if (ret == -EINVAL) {
|
||||
ret = 0;
|
||||
} else {
|
||||
/*
|
||||
* If the return value is -ve but not -EINVAL then the command
|
||||
* failed. If the return value is 0 then the synctask for the
|
||||
* op has begun, so in both cases just 'goto out'. If there was
|
||||
* a failure it will respond with an error, otherwise the
|
||||
* synctask will take the responsibility of sending the
|
||||
* response.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -715,6 +782,7 @@ out:
|
||||
if (op_errstr[0] == '\0')
|
||||
snprintf (op_errstr, sizeof (op_errstr),
|
||||
"operation failed");
|
||||
gf_log (this->name, GF_LOG_ERROR, "%s", op_errstr);
|
||||
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
|
||||
dict, op_errstr);
|
||||
}
|
||||
|
@ -17,81 +17,13 @@
|
||||
#include "glusterd-utils.h"
|
||||
|
||||
static int
|
||||
check_dict_key_value (dict_t *dict, char *key, char *value)
|
||||
{
|
||||
glusterd_conf_t *priv = NULL;
|
||||
int ret = 0;
|
||||
xlator_t *this = NULL;
|
||||
|
||||
this = THIS;
|
||||
GF_ASSERT (this);
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
if (!dict) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Received Empty Dict.");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!key) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Received Empty Key.");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!value) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Received Empty Value.");
|
||||
ret = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
get_volname_volinfo (dict_t *dict, char **volname, glusterd_volinfo_t **volinfo)
|
||||
{
|
||||
glusterd_conf_t *priv = NULL;
|
||||
int ret = 0;
|
||||
xlator_t *this = NULL;
|
||||
|
||||
this = THIS;
|
||||
GF_ASSERT (this);
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
ret = dict_get_str (dict, "volname", volname);
|
||||
if (ret) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = glusterd_volinfo_find (*volname, volinfo);
|
||||
if (ret) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Unable to allocate memory");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
validate_cache_max_min_size (dict_t *dict, char *key, char *value,
|
||||
char **op_errstr)
|
||||
validate_cache_max_min_size (glusterd_volinfo_t *volinfo, dict_t *dict,
|
||||
char *key, char *value, char **op_errstr)
|
||||
{
|
||||
char *current_max_value = NULL;
|
||||
char *current_min_value = NULL;
|
||||
char errstr[2048] = "";
|
||||
char *volname = NULL;
|
||||
glusterd_conf_t *priv = NULL;
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
int ret = 0;
|
||||
uint64_t max_value = 0;
|
||||
uint64_t min_value = 0;
|
||||
@ -102,14 +34,6 @@ validate_cache_max_min_size (dict_t *dict, char *key, char *value,
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
ret = check_dict_key_value (dict, key, value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = get_volname_volinfo (dict, &volname, &volinfo);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if ((!strcmp (key, "performance.cache-min-file-size")) ||
|
||||
(!strcmp (key, "cache-min-file-size"))) {
|
||||
glusterd_volinfo_get (volinfo,
|
||||
@ -150,13 +74,11 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
validate_quota (dict_t *dict, char *key, char *value,
|
||||
char **op_errstr)
|
||||
validate_quota (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
||||
char *value, char **op_errstr)
|
||||
{
|
||||
char errstr[2048] = "";
|
||||
char *volname = NULL;
|
||||
glusterd_conf_t *priv = NULL;
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
int ret = 0;
|
||||
xlator_t *this = NULL;
|
||||
|
||||
@ -165,14 +87,6 @@ validate_quota (dict_t *dict, char *key, char *value,
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
ret = check_dict_key_value (dict, key, value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = get_volname_volinfo (dict, &volname, &volinfo);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = glusterd_volinfo_get_boolean (volinfo, VKEY_FEATURES_QUOTA);
|
||||
if (ret == -1) {
|
||||
gf_log (this->name, GF_LOG_ERROR,
|
||||
@ -197,7 +111,8 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
validate_uss (dict_t *dict, char *key, char *value, char **op_errstr)
|
||||
validate_uss (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
||||
char *value, char **op_errstr)
|
||||
{
|
||||
char errstr[2048] = "";
|
||||
int ret = 0;
|
||||
@ -223,12 +138,11 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
validate_stripe (dict_t *dict, char *key, char *value, char **op_errstr)
|
||||
validate_stripe (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
|
||||
char *value, char **op_errstr)
|
||||
{
|
||||
char errstr[2048] = "";
|
||||
char *volname = NULL;
|
||||
glusterd_conf_t *priv = NULL;
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
int ret = 0;
|
||||
xlator_t *this = NULL;
|
||||
|
||||
@ -237,14 +151,6 @@ validate_stripe (dict_t *dict, char *key, char *value, char **op_errstr)
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
ret = check_dict_key_value (dict, key, value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = get_volname_volinfo (dict, &volname, &volinfo);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (volinfo->stripe_count == 1) {
|
||||
snprintf (errstr, sizeof (errstr),
|
||||
"Cannot set %s for a non-stripe volume.", key);
|
||||
@ -261,13 +167,11 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
validate_subvols_per_directory (dict_t *dict, char *key, char *value,
|
||||
char **op_errstr)
|
||||
validate_subvols_per_directory (glusterd_volinfo_t *volinfo, dict_t *dict,
|
||||
char *key, char *value, char **op_errstr)
|
||||
{
|
||||
char errstr[2048] = "";
|
||||
char *volname = NULL;
|
||||
glusterd_conf_t *priv = NULL;
|
||||
glusterd_volinfo_t *volinfo = NULL;
|
||||
int ret = 0;
|
||||
int subvols = 0;
|
||||
xlator_t *this = NULL;
|
||||
@ -277,14 +181,6 @@ validate_subvols_per_directory (dict_t *dict, char *key, char *value,
|
||||
priv = this->private;
|
||||
GF_ASSERT (priv);
|
||||
|
||||
ret = check_dict_key_value (dict, key, value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = get_volname_volinfo (dict, &volname, &volinfo);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
subvols = atoi(value);
|
||||
|
||||
/* Checking if the subvols-per-directory exceed the total
|
||||
@ -307,6 +203,36 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
validate_replica_heal_enable_disable (glusterd_volinfo_t *volinfo, dict_t *dict,
|
||||
char *key, char *value, char **op_errstr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!glusterd_is_volume_replicate (volinfo)) {
|
||||
gf_asprintf (op_errstr, "Volume %s is not of replicate type",
|
||||
volinfo->volname);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
validate_disperse_heal_enable_disable (glusterd_volinfo_t *volinfo,
|
||||
dict_t *dict, char *key, char *value,
|
||||
char **op_errstr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) {
|
||||
gf_asprintf (op_errstr, "Volume %s is not of disperse type",
|
||||
volinfo->volname);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* dispatch table for VOLUME SET
|
||||
* -----------------------------
|
||||
@ -511,7 +437,8 @@ struct volopt_map_entry glusterd_volopt_map[] = {
|
||||
{ .key = "cluster.self-heal-daemon",
|
||||
.voltype = "cluster/replicate",
|
||||
.option = "!self-heal-daemon",
|
||||
.op_version = 1
|
||||
.op_version = 1,
|
||||
.validate_fn = validate_replica_heal_enable_disable
|
||||
},
|
||||
{ .key = "cluster.heal-timeout",
|
||||
.voltype = "cluster/replicate",
|
||||
@ -1712,6 +1639,14 @@ struct volopt_map_entry glusterd_volopt_map[] = {
|
||||
.type = NO_DOC,
|
||||
.op_version = GD_OP_VERSION_3_7_0,
|
||||
},
|
||||
{ .key = "cluster.disperse-self-heal-daemon",
|
||||
.voltype = "cluster/disperse",
|
||||
.value = "enable",
|
||||
.type = NO_DOC,
|
||||
.option = "self-heal-daemon",
|
||||
.op_version = GD_OP_VERSION_3_7_0,
|
||||
.validate_fn = validate_disperse_heal_enable_disable
|
||||
},
|
||||
{ .key = NULL
|
||||
}
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user