rpc: add owner xlator argument to rpc_clnt_new
The @owner argument tells RPC layer the xlator that owns the connection and to which xlator THIS needs be set during network notifications like CONNECT and DISCONNECT. Code paths that originate from the head of a (volume) graph and use STACK_WIND ensure that the RPC local endpoint has the right xlator saved in the frame of the call (callback pair). This guarantees that the callback is executed in the right xlator context. The client handshake process which includes fetching of brick ports from glusterd, setting lk-version on the brick for the session, don't have the correct xlator set in their frames. The problem lies with RPC notifications. It doesn't have the provision to set THIS with the xlator that is registered with the corresponding RPC programs. e.g, RPC_CLNT_CONNECT event received by protocol/client doesn't have THIS set to its xlator. This implies, call(-callbacks) originating from this thread don't have the right xlator set too. The fix would be to save the xlator registered with the RPC connection during rpc_clnt_new. e.g, protocol/client's xlator would be saved with the RPC connection that it 'owns'. RPC notifications such as CONNECT, DISCONNECT, etc inherit THIS from the RPC connection's xlator. Change-Id: I9dea2c35378c511d800ef58f7fa2ea5552f2c409 BUG: 1235582 Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-on: http://review.gluster.org/11436 Tested-by: Gluster Build System <jenkins@build.gluster.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Raghavendra G <rgowdapp@redhat.com>
This commit is contained in:
parent
546f66f546
commit
f7668938cd
@ -249,7 +249,6 @@ int glfs_first_lookup (xlator_t *subvol);
|
||||
void glfs_process_upcall_event (struct glfs *fs, void *data)
|
||||
GFAPI_PRIVATE(glfs_process_upcall_event, 3.7.0);
|
||||
|
||||
#define DECLARE_OLD_THIS xlator_t *old_THIS = NULL
|
||||
|
||||
#define __GLFS_ENTRY_VALIDATE_FS(fs, label) \
|
||||
do { \
|
||||
|
@ -863,7 +863,7 @@ glfs_mgmt_init (struct glfs *fs)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rpc = rpc_clnt_new (options, ctx, THIS->name, 8);
|
||||
rpc = rpc_clnt_new (options, THIS, THIS->name, 8);
|
||||
if (!rpc) {
|
||||
ret = -1;
|
||||
gf_msg (THIS->name, GF_LOG_WARNING, 0,
|
||||
|
@ -126,7 +126,7 @@ cli_quotad_clnt_init (xlator_t *this, dict_t *options)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
|
||||
rpc = rpc_clnt_new (options, this, this->name, 16);
|
||||
if (!rpc)
|
||||
goto out;
|
||||
|
||||
|
@ -626,7 +626,7 @@ cli_rpc_init (struct cli_state *state)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
|
||||
rpc = rpc_clnt_new (options, this, this->name, 16);
|
||||
if (!rpc)
|
||||
goto out;
|
||||
|
||||
|
@ -2033,7 +2033,7 @@ glusterfs_mgmt_init (glusterfs_ctx_t *ctx)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rpc = rpc_clnt_new (options, THIS->ctx, THIS->name, 8);
|
||||
rpc = rpc_clnt_new (options, THIS, THIS->name, 8);
|
||||
if (!rpc) {
|
||||
ret = -1;
|
||||
gf_log (THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");
|
||||
|
@ -61,6 +61,7 @@
|
||||
|
||||
/* THIS */
|
||||
#define THIS (*__glusterfs_this_location())
|
||||
#define DECLARE_OLD_THIS xlator_t *old_THIS = THIS
|
||||
|
||||
xlator_t **__glusterfs_this_location ();
|
||||
xlator_t *glusterfs_this_get ();
|
||||
|
@ -810,6 +810,16 @@ out:
|
||||
static void
|
||||
rpc_clnt_destroy (struct rpc_clnt *rpc);
|
||||
|
||||
#define RPC_THIS_SAVE(xl) do { \
|
||||
old_THIS = THIS ; \
|
||||
if (!old_THIS) \
|
||||
gf_log_callingfn ("rpc", GF_LOG_CRITICAL, \
|
||||
"THIS is not initialised."); \
|
||||
THIS = xl; \
|
||||
} while (0)
|
||||
|
||||
#define RPC_THIS_RESTORE (THIS = old_THIS)
|
||||
|
||||
int
|
||||
rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
|
||||
rpc_transport_event_t event, void *data, ...)
|
||||
@ -821,6 +831,7 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
|
||||
rpc_transport_pollin_t *pollin = NULL;
|
||||
struct timespec ts = {0, };
|
||||
void *clnt_mydata = NULL;
|
||||
DECLARE_OLD_THIS;
|
||||
|
||||
conn = mydata;
|
||||
if (conn == NULL) {
|
||||
@ -830,6 +841,8 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
|
||||
if (!clnt)
|
||||
goto out;
|
||||
|
||||
RPC_THIS_SAVE (clnt->owner);
|
||||
|
||||
switch (event) {
|
||||
case RPC_TRANSPORT_DISCONNECT:
|
||||
{
|
||||
@ -930,6 +943,7 @@ rpc_clnt_notify (rpc_transport_t *trans, void *mydata,
|
||||
}
|
||||
|
||||
out:
|
||||
RPC_THIS_RESTORE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1034,11 +1048,13 @@ out:
|
||||
}
|
||||
|
||||
struct rpc_clnt *
|
||||
rpc_clnt_new (dict_t *options, glusterfs_ctx_t *ctx, char *name,
|
||||
rpc_clnt_new (dict_t *options, xlator_t *owner, char *name,
|
||||
uint32_t reqpool_size)
|
||||
{
|
||||
int ret = -1;
|
||||
struct rpc_clnt *rpc = NULL;
|
||||
glusterfs_ctx_t *ctx = owner->ctx;
|
||||
|
||||
|
||||
rpc = GF_CALLOC (1, sizeof (*rpc), gf_common_mt_rpcclnt_t);
|
||||
if (!rpc) {
|
||||
@ -1047,6 +1063,7 @@ rpc_clnt_new (dict_t *options, glusterfs_ctx_t *ctx, char *name,
|
||||
|
||||
pthread_mutex_init (&rpc->lock, NULL);
|
||||
rpc->ctx = ctx;
|
||||
rpc->owner = owner;
|
||||
|
||||
if (!reqpool_size)
|
||||
reqpool_size = RPC_CLNT_DEFAULT_REQUEST_COUNT;
|
||||
|
@ -188,9 +188,11 @@ typedef struct rpc_clnt {
|
||||
int refcount;
|
||||
int auth_null;
|
||||
char disabled;
|
||||
xlator_t *owner;
|
||||
} rpc_clnt_t;
|
||||
|
||||
struct rpc_clnt *rpc_clnt_new (dict_t *options, glusterfs_ctx_t *ctx,
|
||||
|
||||
struct rpc_clnt *rpc_clnt_new (dict_t *options, xlator_t *owner,
|
||||
char *name, uint32_t reqpool_size);
|
||||
|
||||
int rpc_clnt_start (struct rpc_clnt *rpc);
|
||||
|
@ -53,7 +53,7 @@ changelog_rpc_client_init (xlator_t *this, void *cbkdata,
|
||||
goto dealloc_dict;
|
||||
}
|
||||
|
||||
rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
|
||||
rpc = rpc_clnt_new (options, this, this->name, 16);
|
||||
if (!rpc)
|
||||
goto dealloc_dict;
|
||||
|
||||
|
@ -448,7 +448,7 @@ quota_enforcer_init (xlator_t *this, dict_t *options)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
|
||||
rpc = rpc_clnt_new (options, this, this->name, 16);
|
||||
if (!rpc) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
|
@ -80,7 +80,7 @@ svs_mgmt_init (xlator_t *this)
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->rpc = rpc_clnt_new (options, this->ctx, this->name, 8);
|
||||
priv->rpc = rpc_clnt_new (options, this, this->name, 8);
|
||||
if (!priv->rpc) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "failed to initialize RPC");
|
||||
goto out;
|
||||
|
@ -46,7 +46,7 @@ glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
|
||||
goto out;
|
||||
|
||||
/* @options is free'd by rpc_transport when destroyed */
|
||||
rpc = rpc_clnt_new (options, this->ctx, (char *)svc->name, 16);
|
||||
rpc = rpc_clnt_new (options, this, (char *)svc->name, 16);
|
||||
if (!rpc) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
|
@ -3291,7 +3291,7 @@ glusterd_rpc_create (struct rpc_clnt **rpc,
|
||||
GF_ASSERT (options);
|
||||
|
||||
/* TODO: is 32 enough? or more ? */
|
||||
new_rpc = rpc_clnt_new (options, this->ctx, this->name, 16);
|
||||
new_rpc = rpc_clnt_new (options, this, this->name, 16);
|
||||
if (!new_rpc)
|
||||
goto out;
|
||||
|
||||
|
@ -1050,7 +1050,7 @@ nlm4_establish_callback (void *csarg)
|
||||
}
|
||||
|
||||
/* TODO: is 32 frames in transit enough ? */
|
||||
rpc_clnt = rpc_clnt_new (options, cs->nfsx->ctx, "NLM-client", 32);
|
||||
rpc_clnt = rpc_clnt_new (options, cs->nfsx, "NLM-client", 32);
|
||||
if (rpc_clnt == NULL) {
|
||||
gf_msg (GF_NLM, GF_LOG_ERROR, EINVAL, NFS_MSG_INVALID_ENTRY,
|
||||
"rpc_clnt NULL");
|
||||
|
@ -2281,7 +2281,7 @@ client_init_rpc (xlator_t *this)
|
||||
goto out;
|
||||
}
|
||||
|
||||
conf->rpc = rpc_clnt_new (this->options, this->ctx, this->name, 0);
|
||||
conf->rpc = rpc_clnt_new (this->options, this, this->name, 0);
|
||||
if (!conf->rpc) {
|
||||
gf_msg (this->name, GF_LOG_ERROR, 0, PC_MSG_RPC_INIT_FAILED,
|
||||
"failed to initialize RPC");
|
||||
|
Loading…
x
Reference in New Issue
Block a user