cli: memory leak issues reported by asan

With this fix, a run on 'rpc-coverage.t' passes properly.
This should help to get started with other fixes soon!

Change-Id: I257ae4e28b9974998a451d3b490cc18c02650ba2
updates: bz#1633930
Signed-off-by: Amar Tumballi <amarts@redhat.com>
This commit is contained in:
Amar Tumballi 2018-10-08 13:21:38 +05:30 committed by Shyamsundar Ranganathan
parent 8d4c5e022b
commit 3fa73bfdb5
3 changed files with 17 additions and 5 deletions

View File

@ -4478,6 +4478,7 @@ gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data)
GLUSTER_CLI_GET_STATE, this, cli_rpc_prog, NULL);
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4615,6 +4616,7 @@ gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data)
this, cli_rpc_prog, NULL);
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4638,6 +4640,7 @@ gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data)
GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog, NULL);
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4718,6 +4721,7 @@ gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4744,6 +4748,7 @@ gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4770,6 +4775,7 @@ gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4830,6 +4836,7 @@ gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -4855,6 +4862,7 @@ gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -8473,6 +8481,7 @@ gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data)
GLUSTER_CLI_STATUS_VOLUME, this, cli_rpc_prog, NULL);
out:
gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -11172,6 +11181,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
out:
if (snap_dict)
dict_unref(snap_dict);
GF_FREE(req.dict.dict_val);
return ret;
}
@ -11265,6 +11275,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
out:
if (snap_dict)
dict_unref(snap_dict);
GF_FREE(req.dict.dict_val);
return ret;
}

View File

@ -618,9 +618,6 @@ cli_rpc_init(struct cli_state *state)
this = THIS;
cli_rpc_prog = &cli_prog;
options = dict_new();
if (!options)
goto out;
/* Connect to glusterd using the specified method, giving preference
* to a unix socket connection. If nothing is specified, connect to
@ -640,6 +637,11 @@ cli_rpc_init(struct cli_state *state)
"Connecting to remote glusterd at "
"%s",
state->remote_host);
options = dict_new();
if (!options)
goto out;
ret = dict_set_str(options, "remote-host", state->remote_host);
if (ret)
goto out;
@ -652,7 +654,6 @@ cli_rpc_init(struct cli_state *state)
goto out;
ret = dict_set_str(options, "transport.address-family", addr_family);
if (ret)
goto out;
} else {

View File

@ -437,7 +437,7 @@ function run_tests()
test_chmod;
test_chown;
test_utimes;
if $run_lock_tests; then
if [ "$run_lock_tests" = "1" ]; then
test_locks;
fi
test_readdir;