glusterd: Maintain local xaction_peer list for op-sm

http://review.gluster.org/9269 addresses maintaining local xaction_peers in
syncop and mgmt_v3 framework. This patch is to maintain local xaction_peers list
for op-sm framework as well.

Change-Id: Idd8484463fed196b3b18c2df7f550a3302c6e138
BUG: 1204727
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/9972
Reviewed-by: Anand Nekkunti <anekkunt@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
This commit is contained in:
Atin Mukherjee 2015-03-24 11:27:52 +05:30 committed by Krishnan Parthasarathi
parent f64666f113
commit 087ad8a001
8 changed files with 62 additions and 47 deletions

View File

@ -150,6 +150,7 @@ enum gf_common_mem_types_ {
gf_common_mt_nfs_exports = 131,
gf_common_mt_gf_brick_spec_t = 132,
gf_common_mt_gf_timer_entry_t = 133,
gf_common_mt_list_head_t = 134,
gf_common_mt_end
};
#endif

View File

@ -625,10 +625,21 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
local_locking_done:
txn_op_info.local_xaction_peers =
GF_CALLOC (1, sizeof (struct cds_list_head *),
gf_common_mt_list_head_t);
if (!txn_op_info.local_xaction_peers) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR, "Out of memory");
goto out;
}
CDS_INIT_LIST_HEAD (txn_op_info.local_xaction_peers);
CDS_INIT_LIST_HEAD (&priv->xaction_peers);
npeers = gd_build_peers_list (&priv->peers, &priv->xaction_peers, op);
/* Maintain xaction_peers on per transaction basis */
npeers = gd_build_local_xaction_peers_list
(&priv->peers,
txn_op_info.local_xaction_peers,
op);
/* If no volname is given as a part of the command, locks will
* not be held, hence sending stage event. */

View File

@ -313,6 +313,9 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id)
dict_del(priv->glusterd_txn_opinfo, uuid_utoa (*txn_id));
if (txn_op_info.local_xaction_peers)
GF_FREE (txn_op_info.local_xaction_peers);
gf_log ("", GF_LOG_DEBUG,
"Successfully cleared opinfo for transaction ID : %s",
uuid_utoa (*txn_id));
@ -2915,8 +2918,8 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
cds_list_for_each_entry (peerinfo, &priv->xaction_peers,
op_peers_list) {
list_for_each_local_xaction_peers (peerinfo,
opinfo.local_xaction_peers) {
GF_ASSERT (peerinfo);
if (!peerinfo->connected || !peerinfo->mgmt)
@ -3005,8 +3008,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
cds_list_for_each_entry (peerinfo, &priv->xaction_peers,
op_peers_list) {
list_for_each_local_xaction_peers (peerinfo,
opinfo.local_xaction_peers) {
GF_ASSERT (peerinfo);
if (!peerinfo->connected || !peerinfo->mgmt ||
@ -3558,8 +3561,8 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
if (op == GD_OP_REPLACE_BRICK)
glusterd_rb_use_rsp_dict (NULL, rsp_dict);
cds_list_for_each_entry (peerinfo, &priv->xaction_peers,
op_peers_list) {
list_for_each_local_xaction_peers (peerinfo,
opinfo.local_xaction_peers) {
GF_ASSERT (peerinfo);
if (!peerinfo->connected || !peerinfo->mgmt)
@ -4208,9 +4211,8 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
cds_list_for_each_entry (peerinfo, &priv->xaction_peers,
op_peers_list) {
list_for_each_local_xaction_peers (peerinfo,
opinfo.local_xaction_peers) {
GF_ASSERT (peerinfo);
if (!peerinfo->connected || !peerinfo->mgmt)
@ -4525,7 +4527,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_clear_op ();
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
glusterd_op_clear_xaction_peers ();
gd_cleanup_local_xaction_peers_list (opinfo.local_xaction_peers);
/* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < GD_OP_VERSION_3_6_0) {

View File

@ -103,6 +103,8 @@ struct glusterd_op_info_ {
int32_t op_errno;
char *op_errstr;
struct cds_list_head pending_bricks;
struct cds_list_head *local_xaction_peers;
};
typedef struct glusterd_op_info_ glusterd_op_info_t;

View File

@ -1128,25 +1128,6 @@ unlock:
return npeers;
}
void
gd_cleanup_local_xaction_peers_list (struct cds_list_head *xact_peers)
{
glusterd_local_peers_t *local_peers = NULL;
glusterd_local_peers_t *tmp = NULL;
GF_ASSERT (xact_peers);
if (cds_list_empty (xact_peers))
return;
cds_list_for_each_entry_safe (local_peers, tmp, xact_peers,
op_peers_list) {
GF_FREE (local_peers);
/* local_peers->peerinfo need not be freed because it does not
* ownership of peerinfo, but merely refer it */
}
}
int
gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
char **op_errstr, int npeers, uuid_t txn_id,
@ -1615,7 +1596,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
gf_boolean_t is_acquired = _gf_false;
uuid_t *txn_id = NULL;
struct cds_list_head xaction_peers = {0,};
glusterd_op_info_t txn_opinfo;
glusterd_op_info_t txn_opinfo = {{0},};
this = THIS;
GF_ASSERT (this);

View File

@ -36,17 +36,6 @@
synclock_lock (&conf->big_lock); \
} while (0)
#define list_for_each_local_xaction_peers(xact_peer, xact_peers_head) \
glusterd_local_peers_t *pos = NULL; \
for (pos = cds_list_entry ((xact_peers_head)->next, \
glusterd_local_peers_t, op_peers_list), \
xact_peer = pos->peerinfo; \
&pos->op_peers_list != (xact_peers_head); \
pos = cds_list_entry(pos->op_peers_list.next, \
glusterd_local_peers_t, op_peers_list), \
xact_peer = pos->peerinfo)
int gd_syncop_submit_request (struct rpc_clnt *rpc, void *req, void *local,
void *cookie, rpc_clnt_prog_t *prog, int procnum,
fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
@ -78,9 +67,6 @@ gd_build_local_xaction_peers_list (struct cds_list_head *peers,
struct cds_list_head *xact_peers,
glusterd_op_t op);
void
gd_cleanup_local_xaction_peers_list (struct cds_list_head *peers);
int
gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char **op_errstr);

View File

@ -9965,3 +9965,22 @@ glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
cds_list_add_rcu (new, pos);
}
void
gd_cleanup_local_xaction_peers_list (struct cds_list_head *xact_peers)
{
glusterd_local_peers_t *local_peers = NULL;
glusterd_local_peers_t *tmp = NULL;
GF_ASSERT (xact_peers);
if (cds_list_empty (xact_peers))
return;
cds_list_for_each_entry_safe (local_peers, tmp, xact_peers,
op_peers_list) {
GF_FREE (local_peers);
/* local_peers->peerinfo need not be freed because it does not
* ownership of peerinfo, but merely refer it */
}
}

View File

@ -46,6 +46,16 @@
*active_count = *active_count + 1;\
} while (0)
#define list_for_each_local_xaction_peers(xact_peer, xact_peers_head) \
glusterd_local_peers_t *pos = NULL; \
for (pos = cds_list_entry ((xact_peers_head)->next, \
glusterd_local_peers_t, op_peers_list), \
xact_peer = pos->peerinfo; \
&pos->op_peers_list != (xact_peers_head); \
pos = cds_list_entry(pos->op_peers_list.next, \
glusterd_local_peers_t, op_peers_list), \
xact_peer = pos->peerinfo)
struct glusterd_lock_ {
uuid_t owner;
time_t timestamp;
@ -698,4 +708,7 @@ glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
int (*compare)(struct cds_list_head *,
struct cds_list_head *));
void
gd_cleanup_local_xaction_peers_list (struct cds_list_head *peers);
#endif