cli, glusterd : Added support for clear-locks command.

Change-Id: I8e7cd51d6e3dd968cced1ec4115b6811f2ab5c1b
BUG: 789858
Signed-off-by: Krishnan Parthasarathi <kp@gluster.com>
Reviewed-on: http://review.gluster.com/2552
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijay Bellur <vijay@gluster.com>
This commit is contained in:
Krishnan Parthasarathi 2011-12-24 13:49:52 +05:30 committed by Vijay Bellur
parent b016fe67b6
commit acb691f890
14 changed files with 754 additions and 23 deletions

View File

@ -2032,3 +2032,67 @@ out:
gf_log ("cli", GF_LOG_ERROR, "Error parsing dumpoptions");
return ret;
}
int
cli_cmd_volume_clrlks_opts_parse (const char **words, int wordcount,
dict_t **options)
{
int ret = -1;
int i = 0;
dict_t *dict = NULL;
char *kind_opts[4] = {"blocked", "granted", "all", NULL};
char *types[4] = {"inode", "entry", "posix", NULL};
char *free_ptr = NULL;
dict = dict_new ();
if (!dict)
goto out;
if (strcmp (words[4], "kind"))
goto out;
for (i = 0; kind_opts[i]; i++) {
if (!strcmp (words[5], kind_opts[i])) {
free_ptr = gf_strdup (words[5]);
ret = dict_set_dynstr (dict, "kind", free_ptr);
if (ret)
goto out;
free_ptr = NULL;
break;
}
}
if (i == 3)
goto out;
ret = -1;
for (i = 0; types[i]; i++) {
if (!strcmp (words[6], types[i])) {
free_ptr = gf_strdup (words[6]);
ret = dict_set_dynstr (dict, "type", free_ptr);
if (ret)
goto out;
free_ptr = NULL;
break;
}
}
if (i == 3)
goto out;
if (wordcount == 8) {
free_ptr = gf_strdup (words[7]);
ret = dict_set_dynstr (dict, "opts", free_ptr);
if (ret)
goto out;
free_ptr = NULL;
}
ret = 0;
*options = dict;
out:
if (ret) {
GF_FREE (free_ptr);
dict_unref (dict);
}
return ret;
}

View File

@ -1681,6 +1681,60 @@ out:
return ret;
}
int
cli_cmd_volume_clearlocks_cbk (struct cli_state *state,
struct cli_cmd_word *word,
const char **words, int wordcount)
{
int ret = -1;
rpc_clnt_procedure_t *proc = NULL;
call_frame_t *frame = NULL;
dict_t *options = NULL;
int sent = 0;
int parse_error = 0;
frame = create_frame (THIS, THIS->ctx->pool);
if (!frame)
goto out;
if (wordcount < 7 || wordcount > 8) {
cli_usage_out (word->pattern);
parse_error = 1;
goto out;
}
ret = cli_cmd_volume_clrlks_opts_parse (words, wordcount, &options);
if (ret) {
parse_error = 1;
gf_log ("cli", GF_LOG_ERROR, "Error parsing "
"clear-locks options");
cli_out ("Error parsing options");
cli_usage_out (word->pattern);
}
ret = dict_set_str (options, "volname", (char *)words[2]);
if (ret)
goto out;
ret = dict_set_str (options, "path", (char *)words[3]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME];
if (proc->fn) {
ret = proc->fn (frame, THIS, options);
}
out:
if (ret) {
cli_cmd_sent_status_get (&sent);
if ((sent == 0) && (parse_error = 0))
cli_out ("Volume clear-locks failed");
}
return ret;
}
struct cli_cmd volume_cmds[] = {
{ "volume info [all|<VOLNAME>]",
cli_cmd_volume_info_cbk,
@ -1785,6 +1839,12 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_list_cbk,
"list all volumes in cluster"},
{"volume clear-locks <VOLNAME> <path> kind {blocked|granted|all}"
"{inode [range]|entry [basename]|posix [range]}",
cli_cmd_volume_clearlocks_cbk,
"Clear locks held on path"
},
{ NULL, NULL, NULL }
};

View File

@ -5640,6 +5640,118 @@ out:
return ret;
}
int32_t
gf_cli3_1_clearlocks_volume_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
gf_cli_rsp rsp = {0,};
int ret = -1;
char *lk_summary = NULL;
char *volname = NULL;
dict_t *dict = NULL;
if (-1 == req->rpc_status)
goto out;
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log ("cli", GF_LOG_ERROR, "XDR decoding failed");
goto out;
}
gf_log ("cli", GF_LOG_DEBUG, "Received response to clear-locks");
if (rsp.op_ret) {
cli_out ("Volume clear-locks unsuccessful");
cli_out ("%s", rsp.op_errstr);
} else {
if (!rsp.dict.dict_len) {
cli_out ("Possibly no locks cleared");
ret = 0;
goto out;
}
dict = dict_new ();
if (!dict) {
ret = -1;
goto out;
}
ret = dict_unserialize (rsp.dict.dict_val,
rsp.dict.dict_len,
&dict);
if (ret) {
gf_log ("cli", GF_LOG_ERROR,
"Unable to serialize response dictionary");
goto out;
}
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log ("cli", GF_LOG_ERROR, "Unable to get volname "
"from dictionary");
goto out;
}
ret = dict_get_str (dict, "lk-summary", &lk_summary);
if (ret) {
gf_log ("cli", GF_LOG_ERROR, "Unable to get lock "
"summary from dictionary");
goto out;
}
cli_out ("Volume clear-locks successful");
cli_out ("%s", lk_summary);
}
ret = rsp.op_ret;
out:
cli_cmd_broadcast_response (ret);
return ret;
}
int32_t
gf_cli3_1_clearlocks_volume (call_frame_t *frame, xlator_t *this,
void *data)
{
gf_cli_req req = {{0,}};
dict_t *options = NULL;
int ret = -1;
if (!frame || !this || !data)
goto out;
options = data;
ret = dict_allocate_and_serialize (options,
&req.dict.dict_val,
(size_t *)&req.dict.dict_len);
if (ret < 0) {
gf_log ("cli", GF_LOG_ERROR,
"failed to serialize the data");
goto out;
}
ret = cli_cmd_submit (&req, frame, cli_rpc_prog,
GLUSTER_CLI_CLRLOCKS_VOLUME, NULL,
this, gf_cli3_1_clearlocks_volume_cbk,
(xdrproc_t)xdr_gf_cli_req);
out:
if (options)
dict_destroy (options);
gf_log ("cli", GF_LOG_DEBUG, "Returning %d", ret);
if (req.dict.dict_val)
GF_FREE (req.dict.dict_val);
return ret;
}
struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_NULL] = {"NULL", NULL },
[GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli3_1_probe},
@ -5675,6 +5787,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", gf_cli3_1_heal_volume},
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", gf_cli3_1_statedump_volume},
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", gf_cli3_1_list_volume},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", gf_cli3_1_clearlocks_volume},
};
struct rpc_clnt_program cli_prog = {

View File

@ -219,6 +219,9 @@ cli_cmd_log_filename_parse (const char **words, int wordcount, dict_t **options)
int32_t
cli_cmd_volume_statedump_options_parse (const char **words, int wordcount,
dict_t **options);
int32_t
cli_cmd_volume_clrlks_opts_parse (const char **words, int wordcount,
dict_t **options);
cli_local_t * cli_local_get ();

View File

@ -155,6 +155,7 @@ enum gluster_cli_procnum {
GLUSTER_CLI_HEAL_VOLUME,
GLUSTER_CLI_STATEDUMP_VOLUME,
GLUSTER_CLI_LIST_VOLUME,
GLUSTER_CLI_CLRLOCKS_VOLUME,
GLUSTER_CLI_MAXVALUE,
};

View File

@ -2621,6 +2621,73 @@ out:
return ret;
}
int
glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
char *volname = NULL;
dict_t *dict = NULL;
GF_ASSERT (req);
ret = -1;
if (!xdr_to_generic (req->msg[0], &cli_req,
(xdrproc_t)xdr_gf_cli_req)) {
req->rpc_err = GARBAGE_ARGS;
goto out;
}
if (cli_req.dict.dict_len) {
dict = dict_new ();
ret = dict_unserialize (cli_req.dict.dict_val,
cli_req.dict.dict_len,
&dict);
if (ret < 0) {
gf_log (THIS->name, GF_LOG_ERROR,
"failed to unserialize req-buffer to"
" dictionary");
goto out;
}
} else {
ret = -1;
gf_log (THIS->name, GF_LOG_ERROR, "Empty cli request.");
goto out;
}
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "failed to get volname");
goto out;
}
gf_log (THIS->name, GF_LOG_INFO, "Received clear-locks volume req "
"for volume %s", volname);
ret = glusterd_op_begin (req, cli_op, dict);
gf_cmd_log ("clear-locks", "on volume %s %s", volname,
((0 == ret) ? "SUCCEEDED" : "FAILED"));
out:
if (ret && dict)
dict_unref (dict);
glusterd_friend_sm ();
glusterd_op_sm ();
if (ret)
ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
NULL, "operation failed");
if (cli_req.dict.dict_val)
free (cli_req.dict.dict_val);
return ret;
}
int
glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event,
@ -2898,6 +2965,7 @@ rpcsvc_actor_t gd_svc_cli_actors[] = {
[GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, NULL, 0},
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, NULL, 0},
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, NULL, 0},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, NULL, 0},
};
struct rpcsvc_program gd_svc_cli_prog = {

View File

@ -71,7 +71,8 @@ typedef enum gf_gld_mem_types_ {
gf_gld_mt_mount_component = gf_common_mt_end + 45,
gf_gld_mt_mount_spec = gf_common_mt_end + 46,
gf_gld_mt_nodesrv_t = gf_common_mt_end + 47,
gf_gld_mt_end = gf_common_mt_end + 48,
gf_gld_mt_charptr = gf_common_mt_end + 48,
gf_gld_mt_end = gf_common_mt_end + 49,
} gf_gld_mem_types_t;
#endif

View File

@ -1616,6 +1616,7 @@ glusterd_op_build_payload (dict_t **req)
case GD_OP_REBALANCE:
case GD_OP_HEAL_VOLUME:
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
{
dict_t *dict = ctx;
dict_copy (dict, req_dict);
@ -2219,8 +2220,16 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
rsp_dict = glusterd_op_init_commit_rsp_dict (req_ctx->op);
if (NULL == rsp_dict)
return -1;
status = glusterd_op_commit_perform (req_ctx->op, dict, &op_errstr,
rsp_dict);
if (GD_OP_CLEARLOCKS_VOLUME == req_ctx->op) {
/*clear locks should be run only on
* originator glusterd*/
status = 0;
} else {
status = glusterd_op_commit_perform (req_ctx->op, dict,
&op_errstr, rsp_dict);
}
if (status) {
gf_log (THIS->name, GF_LOG_ERROR, "Commit failed: %d", status);
@ -2370,6 +2379,10 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_stage_statedump_volume (dict,
op_errstr);
break;
case GD_OP_CLEARLOCKS_VOLUME:
ret = glusterd_op_stage_clearlocks_volume (dict,
op_errstr);
break;
default:
gf_log ("", GF_LOG_ERROR, "Unknown op %d",
@ -2462,6 +2475,10 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
ret = glusterd_op_statedump_volume (dict, op_errstr);
break;
case GD_OP_CLEARLOCKS_VOLUME:
ret = glusterd_op_clearlocks_volume (dict, op_errstr);
break;
default:
gf_log ("", GF_LOG_ERROR, "Unknown op %d",
op);
@ -3740,6 +3757,7 @@ glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
case GD_OP_REBALANCE:
case GD_OP_HEAL_VOLUME:
case GD_OP_STATEDUMP_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
dict_unref (ctx);
break;
default:

View File

@ -147,6 +147,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_STATUS_VOLUME:
case GD_OP_SET_VOLUME:
case GD_OP_LIST_VOLUME:
case GD_OP_CLEARLOCKS_VOLUME:
{
/*nothing specific to be done*/
break;

View File

@ -4798,3 +4798,23 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
(ret?"TRUE":"FALSE"));
return ret;
}
void
glusterd_get_client_filepath (char *filepath, glusterd_volinfo_t *volinfo,
gf_transport_type type)
{
char path[PATH_MAX] = {0,};
glusterd_conf_t *priv = NULL;
priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (path, volinfo, priv);
if ((volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA) &&
(type == GF_TRANSPORT_RDMA))
snprintf (filepath, PATH_MAX, "%s/%s.rdma-fuse.vol",
path, volinfo->volname);
else
snprintf (filepath, PATH_MAX, "%s/%s-fuse.vol",
path, volinfo->volname);
}

View File

@ -386,4 +386,7 @@ glusterd_friend_remove_cleanup_vols (uuid_t uuid);
gf_boolean_t
glusterd_chk_peers_connected_befriended (uuid_t skip_uuid);
void
glusterd_get_client_filepath (char *filepath, glusterd_volinfo_t *volinfo,
gf_transport_type type);
#endif

View File

@ -2871,25 +2871,6 @@ generate_single_transport_client_volfile (glusterd_volinfo_t *volinfo,
return ret;
}
void
get_client_filepath (char *filepath, glusterd_volinfo_t *volinfo, gf_transport_type type)
{
char path[PATH_MAX] = {0,};
glusterd_conf_t *priv = NULL;
priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (path, volinfo, priv);
if ((volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA) &&
(type == GF_TRANSPORT_RDMA))
snprintf (filepath, PATH_MAX, "%s/%s.rdma-fuse.vol",
path, volinfo->volname);
else
snprintf (filepath, PATH_MAX, "%s/%s-fuse.vol",
path, volinfo->volname);
}
static void
enumerate_transport_reqs (gf_transport_type type, char **types)
{
@ -2927,7 +2908,7 @@ generate_client_volfiles (glusterd_volinfo_t *volinfo)
if (ret)
goto out;
type = transport_str_to_type (types[i]);
get_client_filepath (filepath, volinfo, type);
glusterd_get_client_filepath (filepath, volinfo, type);
ret = generate_single_transport_client_volfile (volinfo,
filepath,
dict);

View File

@ -23,6 +23,7 @@
#endif
#include "common-utils.h"
#include "syscall.h"
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@ -1097,6 +1098,73 @@ out:
return ret;
}
int
glusterd_op_stage_clearlocks_volume (dict_t *dict, char **op_errstr)
{
int ret = -1;
char *volname = NULL;
char *path = NULL;
char *type = NULL;
char *kind = NULL;
glusterd_volinfo_t *volinfo = NULL;
char msg[2048] = {0,};
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
snprintf (msg, sizeof(msg), "Failed to get volume name");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
ret = dict_get_str (dict, "path", &path);
if (ret) {
snprintf (msg, sizeof(msg), "Failed to get path");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
ret = dict_get_str (dict, "kind", &kind);
if (ret) {
snprintf (msg, sizeof(msg), "Failed to get kind");
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
ret = dict_get_str (dict, "type", &type);
if (ret) {
snprintf (msg, sizeof(msg), "Failed to get type");
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
snprintf (msg, sizeof(msg), "Volume %s does not exist",
volname);
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
if (!glusterd_is_volume_started (volinfo)) {
snprintf (msg, sizeof(msg), "Volume %s is not started",
volname);
gf_log ("", GF_LOG_ERROR, "%s", msg);
*op_errstr = gf_strdup (msg);
goto out;
}
ret = 0;
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_op_create_volume (dict_t *dict, char **op_errstr)
{
@ -1431,3 +1499,328 @@ out:
return ret;
}
int
glusterd_clearlocks_send_cmd (glusterd_volinfo_t *volinfo, char *cmd,
char *path, char *result, char *errstr,
int err_len, char *mntpt)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
priv = THIS->private;
ret = sys_lgetxattr (mntpt, cmd, result, PATH_MAX);
if (ret < 0) {
snprintf (errstr, err_len, "clear-locks getxattr command "
"failed. Reason: %s", strerror (errno));
gf_log (THIS->name, GF_LOG_DEBUG, "%s", errstr);
goto out;
}
ret = 0;
out:
return ret;
}
int
glusterd_clearlocks_rmdir_mount (glusterd_volinfo_t *volinfo, char *mntpt)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
priv = THIS->private;
ret = rmdir (mntpt);
if (ret) {
gf_log (THIS->name, GF_LOG_DEBUG, "rmdir failed");
goto out;
}
ret = 0;
out:
return ret;
}
void
glusterd_clearlocks_unmount (glusterd_volinfo_t *volinfo, char *mntpt)
{
glusterd_conf_t *priv = NULL;
runner_t runner = {0,};
int ret = 0;
priv = THIS->private;
/*umount failures are ignored. Using stat we could have avoided
* attempting to unmount a non-existent filesystem. But a failure of
* stat() on mount can be due to network failures.*/
runinit (&runner);
runner_add_args (&runner, "/bin/umount", "-f", NULL);
runner_argprintf (&runner, "%s", mntpt);
ret = runner_run (&runner);
if (ret) {
ret = 0;
gf_log ("", GF_LOG_DEBUG,
"umount failed on maintenance client");
}
return;
}
int
glusterd_clearlocks_create_mount (glusterd_volinfo_t *volinfo, char **mntpt)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
char template[PATH_MAX] = {0,};
char *tmpl = NULL;
priv = THIS->private;
snprintf (template, sizeof (template), "/tmp/%s.XXXXXX",
volinfo->volname);
tmpl = mkdtemp (template);
if (!tmpl) {
gf_log (THIS->name, GF_LOG_DEBUG, "Couldn't create temporary "
"mount directory. Reason %s", strerror (errno));
goto out;
}
*mntpt = gf_strdup (tmpl);
ret = 0;
out:
return ret;
}
int
glusterd_clearlocks_mount (glusterd_volinfo_t *volinfo, char **xl_opts,
char *mntpt)
{
int ret = -1;
int i = 0;
glusterd_conf_t *priv = NULL;
runner_t runner = {0,};
char client_volfpath[PATH_MAX] = {0,};
priv = THIS->private;
runinit (&runner);
glusterd_get_client_filepath (client_volfpath, volinfo,
volinfo->transport_type);
runner_add_args (&runner, SBIN_DIR"/glusterfs", "-f", NULL);
runner_argprintf (&runner, "%s", client_volfpath);
for (i = 0; i < volinfo->brick_count && xl_opts[i]; i++) {
runner_add_arg (&runner, "--xlator-option");
runner_argprintf (&runner, "%s", xl_opts[i]);
}
runner_argprintf (&runner, "%s", mntpt);
ret = runner_run (&runner);
if (ret) {
gf_log (THIS->name, GF_LOG_DEBUG,
"Could not start glusterfs");
goto out;
}
gf_log (THIS->name, GF_LOG_DEBUG,
"Started glusterfs successfully");
out:
return ret;
}
int
glusterd_clearlocks_get_local_client_ports (glusterd_volinfo_t *volinfo,
char **xl_opts)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
int index = 0;
int ret = -1;
int i = 0;
int port = 0;
GF_ASSERT (xl_opts);
if (!xl_opts) {
gf_log (THIS->name, GF_LOG_DEBUG, "Should pass non-NULL "
"xl_opts");
goto out;
}
priv = THIS->private;
index = -1;
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
index++;
if (uuid_compare (brickinfo->uuid, priv->uuid))
continue;
port = pmap_registry_search (THIS, brickinfo->path,
GF_PMAP_PORT_BRICKSERVER);
if (!port) {
ret = -1;
gf_log (THIS->name, GF_LOG_DEBUG, "Couldn't get port "
" for brick %s:%s", brickinfo->hostname,
brickinfo->path);
goto out;
}
ret = gf_asprintf (&xl_opts[i], "%s-client-%d.remote-port=%d",
volinfo->volname, index, port);
if (ret == -1) {
xl_opts[i] = NULL;
goto out;
}
i++;
}
ret = 0;
out:
return ret;
}
int
glusterd_op_clearlocks_volume (dict_t *dict, char **op_errstr)
{
int32_t ret = -1;
int i = 0;
char *volname = NULL;
char *path = NULL;
char *kind = NULL;
char *type = NULL;
char *opts = NULL;
char *cmd_str = NULL;
char *free_ptr = NULL;
char msg[PATH_MAX] = {0,};
char result[PATH_MAX] = {0,};
char *mntpt = NULL;
char **xl_opts = NULL;
dict_t *ctx = NULL;
glusterd_volinfo_t *volinfo = NULL;
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to get volume name");
goto out;
}
gf_log ("", GF_LOG_DEBUG, "Performing clearlocks on volume %s", volname);
ret = dict_get_str (dict, "path", &path);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to get path");
goto out;
}
ret = dict_get_str (dict, "kind", &kind);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to get kind");
goto out;
}
ret = dict_get_str (dict, "type", &type);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to get type");
goto out;
}
ret = dict_get_str (dict, "opts", &opts);
if (ret)
ret = 0;
gf_log (THIS->name, GF_LOG_INFO, "Received clear-locks request for "
"volume %s with kind %s type %s and options %s", volname,
kind, type, opts);
if (opts)
ret = gf_asprintf (&cmd_str, GF_XATTR_CLRLK_CMD".t%s.k%s.%s",
type, kind, opts);
else
ret = gf_asprintf (&cmd_str, GF_XATTR_CLRLK_CMD".t%s.k%s",
type, kind);
if (ret == -1)
goto out;
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
snprintf (msg, sizeof (msg), "Volume %s doesn't exist.",
volname);
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
xl_opts = GF_CALLOC (volinfo->brick_count+1, sizeof (char*),
gf_gld_mt_charptr);
if (!xl_opts)
goto out;
ret = glusterd_clearlocks_get_local_client_ports (volinfo, xl_opts);
if (ret) {
snprintf (msg, sizeof (msg), "Couldn't get port numbers of "
"local bricks");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
ret = glusterd_clearlocks_create_mount (volinfo, &mntpt);
if (ret) {
snprintf (msg, sizeof (msg), "Creating mount directory "
"for clear-locks failed.");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
ret = glusterd_clearlocks_mount (volinfo, xl_opts, mntpt);
if (ret) {
snprintf (msg, sizeof (msg), "Failed to mount clear-locks "
"maintenance client.");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
}
ret = glusterd_clearlocks_send_cmd (volinfo, cmd_str, path, result,
msg, sizeof (msg), mntpt);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto umount;
}
ctx = glusterd_op_get_ctx ();
if (!ctx)
/*Impossible. Only originator glusterd can
* come here. */
goto umount;
free_ptr = gf_strdup(result);
if (dict_set_dynstr (ctx, "lk-summary", free_ptr)) {
GF_FREE (free_ptr);
snprintf (msg, sizeof (msg), "Failed to set clear-locks "
"result");
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
}
umount:
glusterd_clearlocks_unmount (volinfo, mntpt);
if (glusterd_clearlocks_rmdir_mount (volinfo, mntpt))
gf_log (THIS->name, GF_LOG_WARNING, "Couldn't unmount "
"clear-locks mount point");
out:
if (ret)
*op_errstr = gf_strdup (msg);
if (xl_opts) {
for (i = 0; i < volinfo->brick_count && xl_opts[i]; i++)
GF_FREE (xl_opts[i]);
GF_FREE (xl_opts);
}
if (cmd_str)
GF_FREE (cmd_str);
if (mntpt)
GF_FREE (mntpt);
return ret;
}

View File

@ -78,6 +78,7 @@ typedef enum glusterd_op_ {
GD_OP_HEAL_VOLUME,
GD_OP_STATEDUMP_VOLUME,
GD_OP_LIST_VOLUME,
GD_OP_CLEARLOCKS_VOLUME,
GD_OP_MAX,
} glusterd_op_t;
@ -537,6 +538,7 @@ int32_t glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx);
/* removed other definitions as they have been defined elsewhere in this file*/
int glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req);
int glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req);
int glusterd_handle_defrag_start (glusterd_volinfo_t *volinfo, char *op_errstr,
size_t len, int cmd, defrag_cbk_fn_t cbk);
@ -576,6 +578,9 @@ int glusterd_op_rebalance (dict_t *dict, char **op_errstr, dict_t *rsp_dict);
int glusterd_op_stage_statedump_volume (dict_t *dict, char **op_errstr);
int glusterd_op_statedump_volume (dict_t *dict, char **op_errstr);
int glusterd_op_stage_clearlocks_volume (dict_t *dict, char **op_errstr);
int glusterd_op_clearlocks_volume (dict_t *dict, char **op_errstr);
/* misc */
void glusterd_do_replace_brick (void *data);
int glusterd_op_perform_remove_brick (glusterd_volinfo_t *volinfo, char *brick,