libglusterfs/client_t client_t implementation, phase 1

Implementation of client_t

The feature page for client_t is at
http://www.gluster.org/community/documentation/index.php/Planning34/client_t

In addition to adding libglusterfs/client_t.[ch] it also extracts/moves
the locktable functionality from xlators/protocol/server to libglusterfs,
where it is used; thus it may now be shared by other xlators too.

This patch is large as it is. Hooking up the state dump is left to do
in phase 2 of this patch set.

(N.B. this change/patch-set supercedes previous change 3689, which was
corrupted during a rebase. That change will be abandoned.)

BUG: 849630
Change-Id: I1433743190630a6d8119a72b81439c0c4c990340
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: http://review.gluster.org/3957
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Amar Tumballi <amarts@redhat.com>
This commit is contained in:
Kaleb S. KEITHLEY 2012-09-18 14:07:40 -04:00 committed by Anand Avati
parent c2064eff89
commit 04536e5308
18 changed files with 1873 additions and 1374 deletions

View File

@ -67,6 +67,7 @@
#include <fnmatch.h>
#include "rpc-clnt.h"
#include "syncop.h"
#include "client_t.h"
#include "daemon.h"
@ -1279,6 +1280,8 @@ glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
pthread_mutex_init (&(ctx->lock), NULL);
ctx->clienttable = gf_clienttable_alloc();
cmd_args = &ctx->cmd_args;
/* parsing command line arguments */

View File

@ -4,7 +4,7 @@ libglusterfs_la_CFLAGS = -Wall $(GF_CFLAGS) \
libglusterfs_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
-I$(CONTRIBDIR)/rbtree
-I$(top_srcdir)/rpc/rpc-lib/src/ -I$(CONTRIBDIR)/rbtree
libglusterfs_la_LIBADD = @LEXLIB@
@ -23,7 +23,7 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
$(CONTRIBDIR)/uuid/uuid_time.c $(CONTRIBDIR)/uuid/compare.c \
$(CONTRIBDIR)/uuid/isnull.c $(CONTRIBDIR)/uuid/unpack.c syncop.c \
graph-print.c trie.c run.c options.c fd-lk.c circ-buff.c \
event-history.c gidcache.c ctx.c \
event-history.c gidcache.c ctx.c client_t.c lock-table.c \
$(CONTRIBDIR)/libgen/basename_r.c $(CONTRIBDIR)/libgen/dirname_r.c \
$(CONTRIBDIR)/stdlib/gf_mkostemp.c \
event-poll.c event-epoll.c
@ -40,8 +40,9 @@ noinst_HEADERS = common-utils.h defaults.h dict.h glusterfs.h hashfn.h \
checksum.h daemon.h $(CONTRIBDIR)/rbtree/rb.h store.h\
rbthash.h iatt.h latency.h mem-types.h $(CONTRIBDIR)/uuid/uuidd.h \
$(CONTRIBDIR)/uuid/uuid.h $(CONTRIBDIR)/uuid/uuidP.h \
$(CONTRIB_BUILDDIR)/uuid/uuid_types.h syncop.h graph-utils.h trie.h run.h \
options.h lkowner.h fd-lk.h circ-buff.h event-history.h gidcache.h
$(CONTRIB_BUILDDIR)/uuid/uuid_types.h syncop.h graph-utils.h trie.h \
run.h options.h lkowner.h fd-lk.h circ-buff.h event-history.h \
gidcache.h client_t.h lock-table.h
EXTRA_DIST = graph.l graph.y

881
libglusterfs/src/client_t.c Normal file
View File

@ -0,0 +1,881 @@
/*
Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "glusterfs.h"
#include "dict.h"
#include "statedump.h"
#include "lock-table.h"
#include "rpcsvc.h"
#include "client_t.h"
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
static int
gf_client_chain_client_entries (cliententry_t *entries, uint32_t startidx,
uint32_t endcount)
{
uint32_t i = 0;
if (!entries) {
gf_log_callingfn ("client_t", GF_LOG_WARNING, "!entries");
return -1;
}
/* Chain only till the second to last entry because we want to
* ensure that the last entry has GF_CLIENTTABLE_END.
*/
for (i = startidx; i < (endcount - 1); i++)
entries[i].next_free = i + 1;
/* i has already been incremented upto the last entry. */
entries[i].next_free = GF_CLIENTTABLE_END;
return 0;
}
static int
gf_client_clienttable_expand (clienttable_t *clienttable, uint32_t nr)
{
cliententry_t *oldclients = NULL;
uint32_t oldmax_clients = -1;
int ret = -1;
if (clienttable == NULL || nr < 0) {
gf_log_callingfn ("client_t", GF_LOG_ERROR, "invalid argument");
ret = EINVAL;
goto out;
}
/* expand size by power-of-two...
this originally came from .../xlators/protocol/server/src/server.c
where it was not commented */
nr /= (1024 / sizeof (cliententry_t));
nr = gf_roundup_next_power_of_two (nr + 1);
nr *= (1024 / sizeof (cliententry_t));
oldclients = clienttable->cliententries;
oldmax_clients = clienttable->max_clients;
clienttable->cliententries = GF_CALLOC (nr, sizeof (cliententry_t),
gf_common_mt_cliententry_t);
if (!clienttable->cliententries) {
ret = ENOMEM;
goto out;
}
clienttable->max_clients = nr;
if (oldclients) {
uint32_t cpy = oldmax_clients * sizeof (cliententry_t);
memcpy (clienttable->cliententries, oldclients, cpy);
}
gf_client_chain_client_entries (clienttable->cliententries, oldmax_clients,
clienttable->max_clients);
/* Now that expansion is done, we must update the client list
* head pointer so that the client allocation functions can continue
* using the expanded table.
*/
clienttable->first_free = oldmax_clients;
GF_FREE (oldclients);
ret = 0;
out:
return ret;
}
clienttable_t *
gf_clienttable_alloc (void)
{
clienttable_t *clienttable = NULL;
clienttable =
GF_CALLOC (1, sizeof (*clienttable), gf_common_mt_clienttable_t);
if (!clienttable)
return NULL;
LOCK_INIT (&clienttable->lock);
gf_client_clienttable_expand (clienttable, GF_CLIENTTABLE_INITIAL_SIZE);
return clienttable;
}
void
gf_client_clienttable_destroy (clienttable_t *clienttable)
{
struct list_head list = {0, };
client_t *client = NULL;
cliententry_t *cliententries = NULL;
uint32_t client_count = 0;
int32_t i = 0;
INIT_LIST_HEAD (&list);
if (!clienttable) {
gf_log_callingfn ("client_t", GF_LOG_WARNING, "!clienttable");
return;
}
LOCK (&clienttable->lock);
{
client_count = clienttable->max_clients;
clienttable->max_clients = 0;
cliententries = clienttable->cliententries;
clienttable->cliententries = NULL;
}
UNLOCK (&clienttable->lock);
if (cliententries != NULL) {
for (i = 0; i < client_count; i++) {
client = cliententries[i].client;
if (client != NULL) {
gf_client_unref (client);
}
}
GF_FREE (cliententries);
LOCK_DESTROY (&clienttable->lock);
GF_FREE (clienttable);
}
}
client_t *
gf_client_get (xlator_t *this, rpcsvc_auth_data_t *cred, char *client_uid)
{
client_t *client = NULL;
cliententry_t *cliententry = NULL;
clienttable_t *clienttable = NULL;
unsigned int i = 0;
if (this == NULL || client_uid == NULL) {
gf_log_callingfn ("client_t", GF_LOG_ERROR, "invalid argument");
errno = EINVAL;
return NULL;
}
gf_log (this->name, GF_LOG_INFO, "client_uid=%s", client_uid);
clienttable = this->ctx->clienttable;
LOCK (&clienttable->lock);
{
for (; i < clienttable->max_clients; i++) {
client = clienttable->cliententries[i].client;
if (client == NULL)
continue;
/*
* look for matching client_uid, _and_
* if auth was used, matching auth flavour and data
*/
if (strcmp (client_uid, client->server_ctx.client_uid) == 0 &&
(cred->flavour != AUTH_NONE &&
(cred->flavour == client->server_ctx.auth.flavour &&
(size_t) cred->datalen == client->server_ctx.auth.len &&
memcmp (cred->authdata,
client->server_ctx.auth.data,
client->server_ctx.auth.len) == 0))) {
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
__sync_add_and_fetch(&client->ref.bind, 1);
#else
LOCK (&client->ref.lock);
{
++client->ref.bind;
}
UNLOCK (&client->ref.lock);
#endif
break;
}
}
if (client) {
gf_client_ref (client);
goto unlock;
}
client = GF_CALLOC (1, sizeof(client_t), gf_common_mt_client_t);
if (client == NULL) {
errno = ENOMEM;
goto unlock;
}
client->this = this;
/* client->server_ctx.lk_version = 0; redundant */
LOCK_INIT (&client->server_ctx.fdtable_lock);
LOCK_INIT (&client->locks_ctx.ltable_lock);
LOCK_INIT (&client->scratch_ctx.lock);
LOCK_INIT (&client->ref.lock);
client->server_ctx.client_uid = gf_strdup (client_uid);
if (client->server_ctx.client_uid == NULL) {
errno = ENOMEM;
GF_FREE (client);
client = NULL;
goto unlock;
}
client->server_ctx.fdtable = gf_fd_fdtable_alloc ();
if (client->server_ctx.fdtable == NULL) {
errno = ENOMEM;
GF_FREE (client->server_ctx.client_uid);
GF_FREE (client);
client = NULL;
goto unlock;
}
client->locks_ctx.ltable = gf_lock_table_new ();
if (client->locks_ctx.ltable == NULL) {
errno = ENOMEM;
GF_FREE (client->server_ctx.fdtable);
GF_FREE (client->server_ctx.client_uid);
GF_FREE (client);
client = NULL;
goto unlock;
}
/* no need to do these atomically here */
client->ref.bind = client->ref.count = 1;
client->server_ctx.auth.flavour = cred->flavour;
if (cred->flavour != AUTH_NONE) {
client->server_ctx.auth.data =
GF_CALLOC (1, cred->datalen, gf_common_mt_client_t);
if (client->server_ctx.auth.data == NULL) {
errno = ENOMEM;
GF_FREE (client->locks_ctx.ltable);
GF_FREE (client->server_ctx.fdtable);
GF_FREE (client->server_ctx.client_uid);
GF_FREE (client);
client = NULL;
goto unlock;
}
memcpy (client->server_ctx.auth.data, cred->authdata,
cred->datalen);
client->server_ctx.auth.len = cred->datalen;
}
client->tbl_index = clienttable->first_free;
cliententry = &clienttable->cliententries[client->tbl_index];
cliententry->client = client;
clienttable->first_free = cliententry->next_free;
cliententry->next_free = GF_CLIENTENTRY_ALLOCATED;
gf_client_ref (client);
}
unlock:
UNLOCK (&clienttable->lock);
return client;
}
void
gf_client_put (client_t *client, gf_boolean_t *detached)
{
gf_boolean_t unref = _gf_false;
int bind_ref;
if (detached)
*detached = _gf_false;
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
bind_ref = __sync_sub_and_fetch(&client->ref.bind, 1);
#else
LOCK (&client->ref.lock);
{
bind_ref = --client->ref.bind;
}
UNLOCK (&client->ref.lock);
#endif
if (bind_ref == 0)
unref = _gf_true;
if (unref) {
gf_log (THIS->name, GF_LOG_INFO, "Shutting down connection %s",
client->server_ctx.client_uid);
if (detached)
*detached = _gf_true;
gf_client_unref (client);
}
}
client_t *
gf_client_ref (client_t *client)
{
if (!client) {
gf_log_callingfn ("client_t", GF_LOG_ERROR, "null client");
return NULL;
}
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
__sync_add_and_fetch(&client->ref.count, 1);
#else
LOCK (&client->ref.lock);
{
++client->ref.count;
}
UNLOCK (&client->ref.lock);
#endif
return client;
}
static void
client_destroy (client_t *client)
{
clienttable_t *clienttable = NULL;
if (client == NULL){
gf_log_callingfn ("xlator", GF_LOG_ERROR, "invalid argument");
goto out;
}
clienttable = client->this->ctx->clienttable;
LOCK_DESTROY (&client->server_ctx.fdtable_lock);
LOCK_DESTROY (&client->locks_ctx.ltable_lock);
LOCK_DESTROY (&client->scratch_ctx.lock);
LOCK_DESTROY (&client->ref.lock);
LOCK (&clienttable->lock);
{
clienttable->cliententries[client->tbl_index].client = NULL;
clienttable->cliententries[client->tbl_index].next_free =
clienttable->first_free;
clienttable->first_free = client->tbl_index;
}
UNLOCK (&clienttable->lock);
GF_FREE (client->server_ctx.auth.data);
GF_FREE (client->scratch_ctx.ctx);
GF_FREE (client->locks_ctx.ltable);
GF_FREE (client->server_ctx.fdtable);
GF_FREE (client->server_ctx.client_uid);
GF_FREE (client);
out:
return;
}
void
gf_client_unref (client_t *client)
{
int refcount;
if (!client) {
gf_log_callingfn ("client_t", GF_LOG_ERROR, "client is NULL");
return;
}
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
refcount = __sync_sub_and_fetch(&client->ref.count, 1);
#else
LOCK (&client->ref.lock);
{
refcount = --client->ref.count;
}
UNLOCK (&client->ref.lock);
#endif
if (refcount == 0) {
client_destroy (client);
}
}
int
__client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value)
{
int index = 0;
int ret = 0;
int set_idx = -1;
if (!client || !xlator)
return -1;
for (index = 0; index < client->scratch_ctx.count; index++) {
if (!client->scratch_ctx.ctx[index].key) {
if (set_idx == -1)
set_idx = index;
/* dont break, to check if key already exists
further on */
}
if (client->scratch_ctx.ctx[index].xl_key == xlator) {
set_idx = index;
break;
}
}
if (set_idx == -1) {
gf_log_callingfn ("", GF_LOG_WARNING, "%p %s", client, xlator->name);
ret = -1;
goto out;
}
client->scratch_ctx.ctx[set_idx].xl_key = xlator;
client->scratch_ctx.ctx[set_idx].value = value;
out:
return ret;
}
int
client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value)
{
int ret = 0;
if (!client || !xlator) {
gf_log_callingfn ("", GF_LOG_WARNING, "%p %p", client, xlator);
return -1;
}
LOCK (&client->scratch_ctx.lock);
{
ret = __client_ctx_set (client, xlator, value);
}
UNLOCK (&client->scratch_ctx.lock);
return ret;
}
int
__client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value)
{
int index = 0;
int ret = 0;
if (!client || !xlator)
return -1;
for (index = 0; index < client->scratch_ctx.count; index++) {
if (client->scratch_ctx.ctx[index].xl_key == xlator)
break;
}
if (index == client->scratch_ctx.count) {
ret = -1;
goto out;
}
if (value)
*value = client->scratch_ctx.ctx[index].value;
out:
return ret;
}
int
client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value)
{
int ret = 0;
if (!client || !xlator)
return -1;
LOCK (&client->scratch_ctx.lock);
{
ret = __client_ctx_get (client, xlator, value);
}
UNLOCK (&client->scratch_ctx.lock);
return ret;
}
int
__client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value)
{
int index = 0;
int ret = 0;
if (!client || !xlator)
return -1;
for (index = 0; index < client->scratch_ctx.count; index++) {
if (client->scratch_ctx.ctx[index].xl_key == xlator)
break;
}
if (index == client->scratch_ctx.count) {
ret = -1;
goto out;
}
if (value)
*value = client->scratch_ctx.ctx[index].value;
client->scratch_ctx.ctx[index].key = 0;
client->scratch_ctx.ctx[index].value = 0;
out:
return ret;
}
int
client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value)
{
int ret = 0;
if (!client || !xlator)
return -1;
LOCK (&client->scratch_ctx.lock);
{
ret = __client_ctx_del (client, xlator, value);
}
UNLOCK (&client->scratch_ctx.lock);
return ret;
}
void
client_dump (client_t *client, char *prefix)
{
char key[GF_DUMP_MAX_BUF_LEN];
if (!client)
return;
memset(key, 0, sizeof key);
gf_proc_dump_write("refcount", "%d", client->ref.count);
}
void
cliententry_dump (cliententry_t *cliententry, char *prefix)
{
if (!cliententry)
return;
if (GF_CLIENTENTRY_ALLOCATED != cliententry->next_free)
return;
if (cliententry->client)
client_dump(cliententry->client, prefix);
}
void
clienttable_dump (clienttable_t *clienttable, char *prefix)
{
int i = 0;
int ret = -1;
char key[GF_DUMP_MAX_BUF_LEN] = {0};
if (!clienttable)
return;
ret = TRY_LOCK (&clienttable->lock);
{
if (ret) {
gf_log ("client_t", GF_LOG_WARNING,
"Unable to acquire lock");
return;
}
memset(key, 0, sizeof key);
gf_proc_dump_build_key(key, prefix, "maxclients");
gf_proc_dump_write(key, "%d", clienttable->max_clients);
gf_proc_dump_build_key(key, prefix, "first_free");
gf_proc_dump_write(key, "%d", clienttable->first_free);
for ( i = 0 ; i < clienttable->max_clients; i++) {
if (GF_CLIENTENTRY_ALLOCATED ==
clienttable->cliententries[i].next_free) {
gf_proc_dump_build_key(key, prefix,
"cliententry[%d]", i);
gf_proc_dump_add_section(key);
cliententry_dump(&clienttable->cliententries[i],
key);
}
}
}
UNLOCK(&clienttable->lock);
}
void
client_ctx_dump (client_t *client, char *prefix)
{
#if 0 /* TBD, FIXME */
struct client_ctx *client_ctx = NULL;
xlator_t *xl = NULL;
int i = 0;
if ((client == NULL) || (client->ctx == NULL)) {
goto out;
}
LOCK (&client->ctx_lock);
if (client->ctx != NULL) {
client_ctx = GF_CALLOC (client->inode->table->xl->graph->ctx_count,
sizeof (*client_ctx),
gf_common_mt_client_ctx);
if (client_ctx == NULL) {
goto unlock;
}
for (i = 0; i < client->inode->table->xl->graph->ctx_count; i++) {
client_ctx[i] = client->ctx[i];
}
}
unlock:
UNLOCK (&client->ctx_lock);
if (client_ctx == NULL) {
goto out;
}
for (i = 0; i < client->inode->table->xl->graph->ctx_count; i++) {
if (client_ctx[i].xl_key) {
xl = (xlator_t *)(long)client_ctx[i].xl_key;
if (xl->dumpops && xl->dumpops->clientctx)
xl->dumpops->clientctx (xl, client);
}
}
out:
GF_FREE (client_ctx);
#endif
}
/*
* the following functions are here to preserve legacy behavior of the
* protocol/server xlator dump, but perhaps they should just be folded
* into the client dump instead?
*/
int
gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict)
{
client_t *client = NULL;
clienttable_t *clienttable = NULL;
int count = 0;
int ret = -1;
char key[GF_DUMP_MAX_BUF_LEN] = {0,};
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
clienttable = this->ctx->clienttable;
if (!clienttable)
return -1;
ret = TRY_LOCK (&clienttable->lock);
{
if (ret) {
gf_log ("client_t", GF_LOG_WARNING,
"Unable to acquire lock");
return -1;
}
for ( ; count < clienttable->max_clients; count++) {
if (GF_CLIENTENTRY_ALLOCATED !=
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
memset(key, 0, sizeof key);
snprintf (key, sizeof key, "conn%d", count++);
fdtable_dump_to_dict (client->server_ctx.fdtable,
key, dict);
}
}
UNLOCK(&clienttable->lock);
ret = dict_set_int32 (dict, "conncount", count);
out:
return ret;
}
int
gf_client_dump_fdtables (xlator_t *this)
{
client_t *client = NULL;
clienttable_t *clienttable = NULL;
int count = 1;
int ret = -1;
char key[GF_DUMP_MAX_BUF_LEN] = {0,};
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
clienttable = this->ctx->clienttable;
if (!clienttable)
return -1;
ret = TRY_LOCK (&clienttable->lock);
{
if (ret) {
gf_log ("client_t", GF_LOG_WARNING,
"Unable to acquire lock");
return -1;
}
for ( ; count < clienttable->max_clients; count++) {
if (GF_CLIENTENTRY_ALLOCATED !=
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
memset(key, 0, sizeof key);
if (client->server_ctx.client_uid) {
gf_proc_dump_build_key (key, "conn",
"%d.id", count);
gf_proc_dump_write (key, "%s",
client->server_ctx.client_uid);
}
gf_proc_dump_build_key (key, "conn", "%d.ref",
count);
gf_proc_dump_write (key, "%d", client->ref.count);
if (client->bound_xl) {
gf_proc_dump_build_key (key, "conn",
"%d.bound_xl", count);
gf_proc_dump_write (key, "%s",
client->bound_xl->name);
}
gf_proc_dump_build_key (key, "conn","%d.id", count);
fdtable_dump (client->server_ctx.fdtable, key);
}
}
UNLOCK(&clienttable->lock);
ret = 0;
out:
return ret;
}
int
gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict)
{
client_t *client = NULL;
clienttable_t *clienttable = NULL;
xlator_t *prev_bound_xl = NULL;
char key[32] = {0,};
int count = 0;
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
clienttable = this->ctx->clienttable;
if (!clienttable)
return -1;
ret = TRY_LOCK (&clienttable->lock);
{
if (ret) {
gf_log ("client_t", GF_LOG_WARNING,
"Unable to acquire lock");
return -1;
}
for ( ; count < clienttable->max_clients; count++) {
if (GF_CLIENTENTRY_ALLOCATED !=
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
memset(key, 0, sizeof key);
if (client->bound_xl && client->bound_xl->itable) {
/* Presently every brick contains only
* one bound_xl for all connections.
* This will lead to duplicating of
* the inode lists, if listing is
* done for every connection. This
* simple check prevents duplication
* in the present case. If need arises
* the check can be improved.
*/
if (client->bound_xl == prev_bound_xl)
continue;
prev_bound_xl = client->bound_xl;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "conn%d", count);
inode_table_dump_to_dict (client->bound_xl->itable,
key, dict);
}
}
}
UNLOCK(&clienttable->lock);
ret = dict_set_int32 (dict, "conncount", count);
out:
if (prev_bound_xl)
prev_bound_xl = NULL;
return ret;
}
int
gf_client_dump_inodes (xlator_t *this)
{
client_t *client = NULL;
clienttable_t *clienttable = NULL;
xlator_t *prev_bound_xl = NULL;
int count = 1;
int ret = -1;
char key[GF_DUMP_MAX_BUF_LEN] = {0,};
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
clienttable = this->ctx->clienttable;
if (!clienttable)
return -1;
ret = TRY_LOCK (&clienttable->lock);
{
if (ret) {
gf_log ("client_t", GF_LOG_WARNING,
"Unable to acquire lock");
return -1;
}
for ( ; count < clienttable->max_clients; count++) {
if (GF_CLIENTENTRY_ALLOCATED !=
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
memset(key, 0, sizeof key);
if (client->bound_xl && client->bound_xl->itable) {
/* Presently every brick contains only
* one bound_xl for all connections.
* This will lead to duplicating of
* the inode lists, if listing is
* done for every connection. This
* simple check prevents duplication
* in the present case. If need arises
* the check can be improved.
*/
if (client->bound_xl == prev_bound_xl)
continue;
prev_bound_xl = client->bound_xl;
gf_proc_dump_build_key(key, "conn",
"%d.bound_xl.%s", count,
client->bound_xl->name);
inode_table_dump(client->bound_xl->itable,key);
}
}
}
UNLOCK(&clienttable->lock);
ret = 0;
out:
return ret;
}

168
libglusterfs/src/client_t.h Normal file
View File

@ -0,0 +1,168 @@
/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _CLIENT_T_H
#define _CLIENT_T_H
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterfs.h"
#include "locking.h" /* for gf_lock_t, not included by glusterfs.h */
struct client_ctx {
union {
uint64_t key;
void *xl_key;
};
union {
uint64_t value;
void *ptr1;
};
};
struct _client_t {
struct {
/* ctx for .../xlators/protocol/server */
gf_lock_t fdtable_lock;
fdtable_t *fdtable;
char *client_uid;
struct _gf_timer *grace_timer;
uint32_t lk_version;
struct {
int flavour;
size_t len;
char *data;
} auth;
} server_ctx;
struct {
/* ctx for .../xlators/features/locks */
gf_lock_t ltable_lock;
struct _lock_table *ltable;
} locks_ctx;
struct {
/* e.g. hekafs uidmap can stash stuff here */
gf_lock_t lock;
unsigned short count;
struct client_ctx *ctx;
} scratch_ctx;
struct {
gf_lock_t lock;
volatile int bind;
volatile int count;
} ref;
xlator_t *bound_xl;
xlator_t *this;
int tbl_index;
};
typedef struct _client_t client_t;
struct client_table_entry {
client_t *client;
int next_free;
};
typedef struct client_table_entry cliententry_t;
struct _clienttable {
unsigned int max_clients;
gf_lock_t lock;
cliententry_t *cliententries;
int first_free;
};
typedef struct _clienttable clienttable_t;
#define GF_CLIENTTABLE_INITIAL_SIZE 32
/* Signifies no more entries in the client table. */
#define GF_CLIENTTABLE_END -1
/* This is used to invalidate
* the next_free value in an cliententry that has been allocated
*/
#define GF_CLIENTENTRY_ALLOCATED -2
client_t *
gf_client_get (xlator_t *this, rpcsvc_auth_data_t *cred, char *client_uid);
void
gf_client_put (client_t *client, gf_boolean_t *detached);
clienttable_t *
gf_clienttable_alloc (void);
void
gf_client_clienttable_destroy (clienttable_t *clienttable);
client_t *
gf_client_ref (client_t *client);
void
gf_client_unref (client_t *client);
int
gf_client_dump_fdtable_to_dict (xlator_t *this, dict_t *dict);
int
gf_client_dump_fdtable (xlator_t *this);
int
gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict);
int
gf_client_dump_inodes (xlator_t *this);
int
client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value);
int
client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value);
int
client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value);
int
_client_ctx_set (client_t *client, xlator_t *xlator, uint64_t value);
int
_client_ctx_get (client_t *client, xlator_t *xlator, uint64_t *value);
int
_client_ctx_del (client_t *client, xlator_t *xlator, uint64_t *value);
void
client_ctx_dump (client_t *client, char *prefix);
int
gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict);
int
gf_client_dump_fdtables (xlator_t *this);
int
gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict);
int
gf_client_dump_inodes (xlator_t *this);
#endif /* _CLIENT_T_H */

View File

@ -140,7 +140,7 @@
#define GLUSTERFS_RPC_REPLY_SIZE 24
#define ZR_FILE_CONTENT_REQUEST(key) (!strncmp(key, ZR_FILE_CONTENT_STR, \
ZR_FILE_CONTENT_STRLEN))
ZR_FILE_CONTENT_STRLEN))
#define DEFAULT_VAR_RUN_DIRECTORY DATADIR "/run/gluster"
#define GF_REPLICATE_TRASH_DIR ".landfill"
@ -192,8 +192,8 @@ typedef enum {
GF_FOP_READDIR,
GF_FOP_INODELK,
GF_FOP_FINODELK,
GF_FOP_ENTRYLK,
GF_FOP_FENTRYLK,
GF_FOP_ENTRYLK,
GF_FOP_FENTRYLK,
GF_FOP_XATTROP,
GF_FOP_FXATTROP,
GF_FOP_FGETXATTR,
@ -258,20 +258,20 @@ typedef enum {
typedef enum {
ENTRYLK_LOCK,
ENTRYLK_UNLOCK,
ENTRYLK_LOCK_NB
ENTRYLK_LOCK,
ENTRYLK_UNLOCK,
ENTRYLK_LOCK_NB
} entrylk_cmd;
typedef enum {
ENTRYLK_RDLCK,
ENTRYLK_WRLCK
ENTRYLK_RDLCK,
ENTRYLK_WRLCK
} entrylk_type;
typedef enum {
GF_XATTROP_ADD_ARRAY,
GF_XATTROP_ADD_ARRAY,
GF_XATTROP_ADD_ARRAY64,
GF_XATTROP_OR_ARRAY,
GF_XATTROP_AND_ARRAY
@ -287,10 +287,10 @@ typedef enum {
#define GF_CONTENT_KEY "glusterfs.content"
struct _xlator_cmdline_option {
struct list_head cmd_args;
char *volume;
char *key;
char *value;
struct list_head cmd_args;
char *volume;
char *key;
char *value;
};
typedef struct _xlator_cmdline_option xlator_cmdline_option_t;
@ -300,22 +300,22 @@ typedef struct _xlator_cmdline_option xlator_cmdline_option_t;
#define GF_OPTION_DEFERRED 2
struct _cmd_args {
/* basic options */
char *volfile_server;
char *volfile;
/* basic options */
char *volfile_server;
char *volfile;
char *log_server;
gf_loglevel_t log_level;
char *log_file;
gf_loglevel_t log_level;
char *log_file;
int32_t max_connect_attempts;
/* advanced options */
uint32_t volfile_server_port;
char *volfile_server_transport;
/* advanced options */
uint32_t volfile_server_port;
char *volfile_server_transport;
uint32_t log_server_port;
char *pid_file;
char *pid_file;
char *sock_file;
int no_daemon_mode;
char *run_id;
int debug_mode;
int no_daemon_mode;
char *run_id;
int debug_mode;
int read_only;
int acl;
int selinux;
@ -331,13 +331,13 @@ struct _cmd_args {
int fuse_direct_io_mode;
char *use_readdirp;
int volfile_check;
double fuse_entry_timeout;
double fuse_negative_timeout;
double fuse_attribute_timeout;
char *volume_name;
int fuse_nodev;
int fuse_nosuid;
char *dump_fuse;
double fuse_entry_timeout;
double fuse_negative_timeout;
double fuse_attribute_timeout;
char *volume_name;
int fuse_nodev;
int fuse_nosuid;
char *dump_fuse;
pid_t client_pid;
int client_pid_set;
unsigned uid_map_root;
@ -345,9 +345,9 @@ struct _cmd_args {
int congestion_threshold;
char *fuse_mountopts;
/* key args */
char *mount_point;
char *volfile_id;
/* key args */
char *mount_point;
char *volfile_id;
/* required for portmap */
int brick_port;
@ -376,16 +376,16 @@ typedef struct _glusterfs_graph glusterfs_graph_t;
typedef int32_t (*glusterfsd_mgmt_event_notify_fn_t) (int32_t event, void *data,
...);
struct _glusterfs_ctx {
cmd_args_t cmd_args;
char *process_uuid;
FILE *pidfp;
char fin;
void *timer;
void *ib;
void *pool;
void *event_pool;
cmd_args_t cmd_args;
char *process_uuid;
FILE *pidfp;
char fin;
void *timer;
void *ib;
void *pool;
void *event_pool;
void *iobuf_pool;
pthread_mutex_t lock;
pthread_mutex_t lock;
size_t page_size;
struct list_head graphs; /* double linked list of graphs - one per volfile parse */
glusterfs_graph_t *active; /* the latest graph in use */
@ -401,12 +401,12 @@ struct _glusterfs_ctx {
got changed */
pid_t mnt_pid; /* pid of the mount agent */
int process_mode; /*mode in which process is runninng*/
struct syncenv *env; /* The env pointer to the synctasks */
struct syncenv *env; /* The env pointer to the synctasks */
struct list_head mempool_list; /* used to keep a global list of
mempools, used to log details of
mempool in statedump */
char *statedump_path;
char *statedump_path;
struct mem_pool *dict_pool;
struct mem_pool *dict_pair_pool;
@ -416,9 +416,11 @@ struct _glusterfs_ctx {
call to fsd-mgmt */
gf_log_handle_t log; /* all logging related variables */
int mem_acct_enable;
int mem_acct_enable;
int daemon_pipe[2];
struct _clienttable *clienttable;
};
typedef struct _glusterfs_ctx glusterfs_ctx_t;

View File

@ -0,0 +1,128 @@
/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "lock-table.h"
#include "common-utils.h"
struct _lock_table *
gf_lock_table_new (void)
{
struct _lock_table *new = NULL;
new = GF_CALLOC (1, sizeof (struct _lock_table), gf_common_mt_lock_table);
if (new == NULL) {
goto out;
}
INIT_LIST_HEAD (&new->entrylk_lockers);
INIT_LIST_HEAD (&new->inodelk_lockers);
LOCK_INIT (&new->lock);
out:
return new;
}
int
gf_add_locker (struct _lock_table *table, const char *volume,
loc_t *loc, fd_t *fd, pid_t pid, gf_lkowner_t *owner,
glusterfs_fop_t type)
{
int32_t ret = -1;
struct _locker *new = NULL;
GF_VALIDATE_OR_GOTO ("lock-table", table, out);
GF_VALIDATE_OR_GOTO ("lock-table", volume, out);
new = GF_CALLOC (1, sizeof (struct _locker), gf_common_mt_locker);
if (new == NULL) {
goto out;
}
INIT_LIST_HEAD (&new->lockers);
new->volume = gf_strdup (volume);
if (fd == NULL) {
loc_copy (&new->loc, loc);
} else {
new->fd = fd_ref (fd);
}
new->pid = pid;
new->owner = *owner;
LOCK (&table->lock);
{
if (type == GF_FOP_ENTRYLK)
list_add_tail (&new->lockers, &table->entrylk_lockers);
else
list_add_tail (&new->lockers, &table->inodelk_lockers);
}
UNLOCK (&table->lock);
out:
return ret;
}
int
gf_del_locker (struct _lock_table *table, const char *volume,
loc_t *loc, fd_t *fd, gf_lkowner_t *owner, glusterfs_fop_t type)
{
struct _locker *locker = NULL;
struct _locker *tmp = NULL;
int32_t ret = -1;
struct list_head *head = NULL;
struct list_head del;
GF_VALIDATE_OR_GOTO ("lock-table", table, out);
GF_VALIDATE_OR_GOTO ("lock-table", volume, out);
INIT_LIST_HEAD (&del);
LOCK (&table->lock);
{
if (type == GF_FOP_ENTRYLK) {
head = &table->entrylk_lockers;
} else {
head = &table->inodelk_lockers;
}
list_for_each_entry_safe (locker, tmp, head, lockers) {
if (!is_same_lkowner (&locker->owner, owner) ||
strcmp (locker->volume, volume))
continue;
if (locker->fd && fd && (locker->fd == fd))
list_move_tail (&locker->lockers, &del);
else if (locker->loc.inode && loc &&
(locker->loc.inode == loc->inode))
list_move_tail (&locker->lockers, &del);
}
}
UNLOCK (&table->lock);
tmp = NULL;
locker = NULL;
list_for_each_entry_safe (locker, tmp, &del, lockers) {
list_del_init (&locker->lockers);
if (locker->fd)
fd_unref (locker->fd);
else
loc_wipe (&locker->loc);
GF_FREE (locker->volume);
GF_FREE (locker);
}
ret = 0;
out:
return ret;
}

View File

@ -0,0 +1,54 @@
/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _LOCK_TABLE_H
#define _LOCK_TABLE_H
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "xlator.h"
struct _locker {
struct list_head lockers;
char *volume;
loc_t loc;
fd_t *fd;
gf_lkowner_t owner;
pid_t pid;
};
struct _lock_table {
struct list_head inodelk_lockers;
struct list_head entrylk_lockers;
gf_lock_t lock;
};
int32_t
gf_add_locker (struct _lock_table *table, const char *volume,
loc_t *loc,
fd_t *fd,
pid_t pid,
gf_lkowner_t *owner,
glusterfs_fop_t type);
int32_t
gf_del_locker (struct _lock_table *table, const char *volume,
loc_t *loc,
fd_t *fd,
gf_lkowner_t *owner,
glusterfs_fop_t type);
struct _lock_table *
gf_lock_table_new (void);
#endif /* _LOCK_TABLE_H */

View File

@ -109,6 +109,12 @@ enum gf_common_mem_types_ {
gf_common_mt_drc_rbtree_node_t = 93,
gf_common_mt_iov_base_t = 94,
gf_common_mt_groups_t = 95,
gf_common_mt_end = 96
gf_common_mt_cliententry_t = 96,
gf_common_mt_clienttable_t = 97,
gf_common_mt_client_t = 98,
gf_common_mt_client_ctx = 99,
gf_common_mt_lock_table = 100,
gf_common_mt_locker = 101,
gf_common_mt_end = 102
};
#endif

View File

@ -186,7 +186,7 @@ struct rpc_transport {
*/
void *private;
void *xl_private;
struct _client_t *xl_private;
void *xl; /* Used for THIS */
void *mydata;
pthread_mutex_t lock;
@ -211,7 +211,7 @@ struct rpc_transport {
struct list_head list;
int bind_insecure;
void *dl_handle; /* handle of dlopen() */
void *dl_handle; /* handle of dlopen() */
};
struct rpc_transport_ops {

View File

@ -34,6 +34,6 @@ exec 4>&-
EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}0 a
exec 5>&-
EXPECT "" get_fd_count $V0 $H0 $B0/${V0}0 a
EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 a
cleanup

View File

@ -135,10 +135,10 @@ __pl_inode_is_empty (pl_inode_t *pl_inode)
void
pl_print_locker (char *str, int size, xlator_t *this, call_frame_t *frame)
{
snprintf (str, size, "Pid=%llu, lk-owner=%s, Transport=%p, Frame=%llu",
snprintf (str, size, "Pid=%llu, lk-owner=%s, Client=%p, Frame=%llu",
(unsigned long long) frame->root->pid,
lkowner_utoa (&frame->root->lk_owner),
(void *)frame->root->trans,
frame->root->trans,
(unsigned long long) frame->root->unique);
}

View File

@ -20,6 +20,7 @@
#include "compat-errno.h"
#include "glusterfs3.h"
#include "authenticate.h"
#include "client_t.h"
struct __get_xl_struct {
const char *name;
@ -94,9 +95,9 @@ _volfile_update_checksum (xlator_t *this, char *key, uint32_t checksum)
if (temp_volfile->checksum != checksum) {
gf_log (this->name, GF_LOG_INFO,
"the volume file got modified between earlier access "
"and now, this may lead to inconsistency between "
"clients, advised to remount client");
"the volume file was modified between a prior access "
"and now. This may lead to inconsistency between "
"clients, you are advised to remount client");
temp_volfile->checksum = checksum;
}
@ -109,10 +110,10 @@ static size_t
getspec_build_volfile_path (xlator_t *this, const char *key, char *path,
size_t path_len)
{
int ret = -1;
char *filename = NULL;
server_conf_t *conf = NULL;
int ret = -1;
int free_filename = 0;
char *filename = NULL;
server_conf_t *conf = NULL;
char data_key[256] = {0,};
conf = this->private;
@ -329,14 +330,14 @@ server_setvolume (rpcsvc_request_t *req)
{
gf_setvolume_req args = {{0,},};
gf_setvolume_rsp rsp = {0,};
server_connection_t *conn = NULL;
client_t *client = NULL;
server_conf_t *conf = NULL;
peer_info_t *peerinfo = NULL;
dict_t *reply = NULL;
dict_t *config_params = NULL;
dict_t *params = NULL;
char *name = NULL;
char *process_uuid = NULL;
char *client_uid = NULL;
char *clnt_version = NULL;
xlator_t *xl = NULL;
char *msg = NULL;
@ -393,7 +394,7 @@ server_setvolume (rpcsvc_request_t *req)
params->extra_free = buf;
buf = NULL;
ret = dict_get_str (params, "process-uuid", &process_uuid);
ret = dict_get_str (params, "process-uuid", &client_uid);
if (ret < 0) {
ret = dict_set_str (reply, "ERROR",
"UUID not specified");
@ -420,25 +421,26 @@ server_setvolume (rpcsvc_request_t *req)
goto fail;
}
conn = server_connection_get (this, process_uuid);
if (!conn) {
client = gf_client_get (this, &req->cred, client_uid);
if (client == NULL) {
op_ret = -1;
op_errno = ENOMEM;
goto fail;
}
gf_log (this->name, GF_LOG_DEBUG, "Connected to %s", conn->id);
cancelled = server_cancel_conn_timer (this, conn);
if (cancelled)//Do connection_put on behalf of grace-timer-handler.
server_connection_put (this, conn, NULL);
if (conn->lk_version != 0 &&
conn->lk_version != lk_version) {
(void) server_connection_cleanup (this, conn,
gf_log (this->name, GF_LOG_DEBUG, "Connected to %s",
client->server_ctx.client_uid);
cancelled = server_cancel_grace_timer (this, client);
if (cancelled)//Do gf_client_put on behalf of grace-timer-handler.
gf_client_put (client, NULL);
if (client->server_ctx.lk_version != 0 &&
client->server_ctx.lk_version != lk_version) {
(void) server_connection_cleanup (this, client,
INTERNAL_LOCKS | POSIX_LOCKS);
}
if (req->trans->xl_private != conn)
req->trans->xl_private = conn;
if (req->trans->xl_private != client)
req->trans->xl_private = client;
ret = dict_get_int32 (params, "fops-version", &fop_version);
if (ret < 0) {
@ -563,10 +565,10 @@ server_setvolume (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_INFO,
"accepted client from %s (version: %s)",
conn->id,
client->server_ctx.client_uid,
(clnt_version) ? clnt_version : "old");
op_ret = 0;
conn->bound_xl = xl;
client->bound_xl = xl;
ret = dict_set_str (reply, "ERROR", "Success");
if (ret < 0)
gf_log (this->name, GF_LOG_DEBUG,
@ -574,7 +576,7 @@ server_setvolume (rpcsvc_request_t *req)
} else {
gf_log (this->name, GF_LOG_ERROR,
"Cannot authenticate client from %s %s",
conn->id,
client->server_ctx.client_uid,
(clnt_version) ? clnt_version : "old");
op_ret = -1;
@ -586,7 +588,7 @@ server_setvolume (rpcsvc_request_t *req)
goto fail;
}
if (conn->bound_xl == NULL) {
if (client->bound_xl == NULL) {
ret = dict_set_str (reply, "ERROR",
"Check volfile and handshake "
"options in protocol/client");
@ -599,20 +601,21 @@ server_setvolume (rpcsvc_request_t *req)
goto fail;
}
if ((conn->bound_xl != NULL) &&
if ((client->bound_xl != NULL) &&
(ret >= 0) &&
(conn->bound_xl->itable == NULL)) {
(client->bound_xl->itable == NULL)) {
/* create inode table for this bound_xl, if one doesn't
already exist */
gf_log (this->name, GF_LOG_TRACE,
"creating inode table with lru_limit=%"PRId32", "
"xlator=%s", conf->inode_lru_limit,
conn->bound_xl->name);
client->bound_xl->name);
/* TODO: what is this ? */
conn->bound_xl->itable = inode_table_new (conf->inode_lru_limit,
conn->bound_xl);
client->bound_xl->itable =
inode_table_new (conf->inode_lru_limit,
client->bound_xl);
}
ret = dict_set_str (reply, "process-uuid",
@ -622,7 +625,7 @@ server_setvolume (rpcsvc_request_t *req)
"failed to set 'process-uuid'");
ret = dict_set_uint32 (reply, "clnt-lk-version",
conn->lk_version);
client->server_ctx.lk_version);
if (ret)
gf_log (this->name, GF_LOG_WARNING,
"failed to set 'clnt-lk-version'");
@ -664,15 +667,15 @@ fail:
* list of connections the server is maintaining and might segfault
* during statedump when bound_xl of the connection is accessed.
*/
if (op_ret && conn && !xl) {
if (op_ret && !xl) {
/* We would have set the xl_private of the transport to the
* @conn. But if we have put the connection i.e shutting down
* the connection, then we should set xl_private to NULL as it
* would be pointing to a freed memory and would segfault when
* accessed upon getting DISCONNECT.
*/
if (server_connection_put (this, conn, NULL) == NULL)
req->trans->xl_private = NULL;
gf_client_put (client, NULL);
req->trans->xl_private = NULL;
}
server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
(xdrproc_t)xdr_gf_setvolume_rsp);
@ -709,12 +712,12 @@ server_ping (rpcsvc_request_t *req)
int
server_set_lk_version (rpcsvc_request_t *req)
{
int op_ret = -1;
int op_errno = EINVAL;
gf_set_lk_ver_req args = {0, };
gf_set_lk_ver_rsp rsp = {0,};
server_connection_t *conn = NULL;
xlator_t *this = NULL;
int op_ret = -1;
int op_errno = EINVAL;
gf_set_lk_ver_req args = {0,};
gf_set_lk_ver_rsp rsp = {0,};
client_t *client = NULL;
xlator_t *this = NULL;
this = req->svc->mydata;
//TODO: Decide on an appropriate errno for the error-path
@ -730,9 +733,9 @@ server_set_lk_version (rpcsvc_request_t *req)
goto fail;
}
conn = server_connection_get (this, args.uid);
conn->lk_version = args.lk_ver;
server_connection_put (this, conn, NULL);
client = gf_client_get (this, &req->cred, args.uid);
client->server_ctx.lk_version = args.lk_ver;
gf_client_put (client, NULL);
rsp.lk_ver = args.lk_ver;

View File

@ -15,6 +15,8 @@
#include "server.h"
#include "server-helpers.h"
#include "client_t.h"
#include "lock-table.h"
#include <fnmatch.h>
@ -26,8 +28,8 @@ server_decode_groups (call_frame_t *frame, rpcsvc_request_t *req)
GF_VALIDATE_OR_GOTO ("server", frame, out);
GF_VALIDATE_OR_GOTO ("server", req, out);
if (call_stack_alloc_groups (frame->root, req->auxgidcount) != 0)
return -1;
if (call_stack_alloc_groups (frame->root, req->auxgidcount) != 0)
return -1;
frame->root->ngrps = req->auxgidcount;
if (frame->root->ngrps == 0)
@ -42,6 +44,7 @@ out:
return 0;
}
void
server_loc_wipe (loc_t *loc)
{
@ -73,9 +76,9 @@ server_resolve_wipe (server_resolve_t *resolve)
void
free_state (server_state_t *state)
{
if (state->conn) {
//xprt_svc_unref (state->conn);
state->conn = NULL;
if (state->client) {
/* should we gf_client_unref(state->client) here? */
state->client = NULL;
}
if (state->xprt) {
@ -126,121 +129,6 @@ free_state (server_state_t *state)
}
int
gf_add_locker (server_connection_t *conn, const char *volume,
loc_t *loc, fd_t *fd, pid_t pid, gf_lkowner_t *owner,
glusterfs_fop_t type)
{
int32_t ret = -1;
struct _locker *new = NULL;
struct _lock_table *table = NULL;
GF_VALIDATE_OR_GOTO ("server", volume, out);
new = GF_CALLOC (1, sizeof (struct _locker), gf_server_mt_locker_t);
if (new == NULL) {
goto out;
}
INIT_LIST_HEAD (&new->lockers);
new->volume = gf_strdup (volume);
if (fd == NULL) {
loc_copy (&new->loc, loc);
} else {
new->fd = fd_ref (fd);
}
new->pid = pid;
new->owner = *owner;
pthread_mutex_lock (&conn->lock);
{
table = conn->ltable;
if (type == GF_FOP_ENTRYLK)
list_add_tail (&new->lockers, &table->entrylk_lockers);
else
list_add_tail (&new->lockers, &table->inodelk_lockers);
}
pthread_mutex_unlock (&conn->lock);
out:
return ret;
}
int
gf_del_locker (server_connection_t *conn, const char *volume,
loc_t *loc, fd_t *fd, gf_lkowner_t *owner,
glusterfs_fop_t type)
{
struct _locker *locker = NULL;
struct _locker *tmp = NULL;
int32_t ret = -1;
struct list_head *head = NULL;
struct _lock_table *table = NULL;
int found = 0;
GF_VALIDATE_OR_GOTO ("server", volume, out);
pthread_mutex_lock (&conn->lock);
{
table = conn->ltable;
if (type == GF_FOP_ENTRYLK) {
head = &table->entrylk_lockers;
} else {
head = &table->inodelk_lockers;
}
list_for_each_entry_safe (locker, tmp, head, lockers) {
if (!is_same_lkowner (&locker->owner, owner) ||
strcmp (locker->volume, volume))
continue;
if (locker->fd && fd && (locker->fd == fd))
found = 1;
else if (locker->loc.inode && loc &&
(locker->loc.inode == loc->inode))
found = 1;
if (found) {
list_del_init (&locker->lockers);
break;
}
}
if (!found)
locker = NULL;
}
pthread_mutex_unlock (&conn->lock);
if (locker) {
if (locker->fd)
fd_unref (locker->fd);
else
loc_wipe (&locker->loc);
GF_FREE (locker->volume);
GF_FREE (locker);
}
ret = 0;
out:
return ret;
}
static struct _lock_table *
gf_lock_table_new (void)
{
struct _lock_table *new = NULL;
new = GF_CALLOC (1, sizeof (struct _lock_table), gf_server_mt_lock_table_t);
if (new == NULL) {
goto out;
}
INIT_LIST_HEAD (&new->entrylk_lockers);
INIT_LIST_HEAD (&new->inodelk_lockers);
out:
return new;
}
static int
server_nop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, dict_t *xdata)
@ -252,12 +140,13 @@ server_nop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
GF_VALIDATE_OR_GOTO ("server", cookie, out);
GF_VALIDATE_OR_GOTO ("server", this, out);
if (frame->root->trans)
server_conn_unref (frame->root->trans);
state = CALL_STATE(frame);
if (state)
if (state) {
gf_client_unref (state->client);
free_state (state);
}
STACK_DESTROY (frame->root);
ret = 0;
@ -265,23 +154,24 @@ out:
return ret;
}
int
do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
call_frame_t *frame, struct _lock_table *ltable)
static int
do_lock_table_cleanup (xlator_t *this, client_t *client, call_frame_t *frame,
struct _lock_table *ltable)
{
struct list_head inodelk_lockers, entrylk_lockers;
call_frame_t *tmp_frame = NULL;
struct gf_flock flock = {0, };
xlator_t *bound_xl = NULL;
struct _locker *locker = NULL, *tmp = NULL;
int ret = -1;
call_frame_t *tmp_frame = NULL;
xlator_t *bound_xl = NULL;
struct _locker *locker = NULL, *tmp = NULL;
char *path = NULL;
int ret = -1;
struct gf_flock flock = {0, };
struct list_head inodelk_lockers, entrylk_lockers;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", conn, out);
GF_VALIDATE_OR_GOTO ("server", frame, out);
GF_VALIDATE_OR_GOTO ("server", ltable, out);
bound_xl = conn->bound_xl;
bound_xl = client->bound_xl;
INIT_LIST_HEAD (&inodelk_lockers);
INIT_LIST_HEAD (&entrylk_lockers);
@ -294,8 +184,7 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
flock.l_type = F_UNLCK;
flock.l_start = 0;
flock.l_len = 0;
list_for_each_entry_safe (locker,
tmp, &inodelk_lockers, lockers) {
list_for_each_entry_safe (locker, tmp, &inodelk_lockers, lockers) {
tmp_frame = copy_frame (frame);
if (tmp_frame == NULL) {
goto out;
@ -304,14 +193,27 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
lock owner = 0 is a special case that tells posix-locks
to release all locks from this transport
*/
tmp_frame->root->pid = 0;
tmp_frame->root->trans = server_conn_ref (conn);
tmp_frame->root->pid = 0;
gf_client_ref (client);
tmp_frame->root->trans = client;
memset (&tmp_frame->root->lk_owner, 0, sizeof (gf_lkowner_t));
if (locker->fd) {
gf_log (this->name, GF_LOG_DEBUG, "finodelk "
"released on inode with gfid %s",
uuid_utoa (locker->fd->inode->gfid));
GF_ASSERT (locker->fd->inode);
ret = inode_path (locker->fd->inode, NULL, &path);
if (ret > 0) {
gf_log (this->name, GF_LOG_INFO,
"finodelk released on %s", path);
GF_FREE (path);
} else {
gf_log (this->name, GF_LOG_INFO,
"finodelk released on inode with gfid %s",
uuid_utoa (locker->fd->inode->gfid));
}
STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
bound_xl->fops->finodelk,
@ -319,8 +221,8 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
locker->fd, F_SETLK, &flock, NULL);
fd_unref (locker->fd);
} else {
gf_log (this->name, GF_LOG_DEBUG, "inodelk released "
"on %s", locker->loc.path);
gf_log (this->name, GF_LOG_INFO,
"inodelk released on %s", locker->loc.path);
STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
bound_xl->fops->inodelk,
@ -340,16 +242,26 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
list_for_each_entry_safe (locker, tmp, &entrylk_lockers, lockers) {
tmp_frame = copy_frame (frame);
tmp_frame->root->pid = 0;
tmp_frame->root->trans = server_conn_ref (conn);
tmp_frame->root->pid = 0;
gf_client_ref (client);
tmp_frame->root->trans = client;
memset (&tmp_frame->root->lk_owner, 0, sizeof (gf_lkowner_t));
if (locker->fd) {
GF_ASSERT (locker->fd->inode);
gf_log (this->name, GF_LOG_DEBUG, "fentrylk "
"released on inode with gfid %s",
uuid_utoa (locker->fd->inode->gfid));
ret = inode_path (locker->fd->inode, NULL, &path);
if (ret > 0) {
gf_log (this->name, GF_LOG_INFO,
"fentrylk released on %s", path);
GF_FREE (path);
} else {
gf_log (this->name, GF_LOG_INFO,
"fentrylk released on inode with gfid %s",
uuid_utoa (locker->fd->inode->gfid));
}
STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
bound_xl->fops->fentrylk,
@ -358,8 +270,8 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
ENTRYLK_UNLOCK, ENTRYLK_WRLCK, NULL);
fd_unref (locker->fd);
} else {
gf_log (this->name, GF_LOG_DEBUG, "entrylk released "
"on %s", locker->loc.path);
gf_log (this->name, GF_LOG_INFO,
"entrylk released on %s", locker->loc.path);
STACK_WIND (tmp_frame, server_nop_cbk, bound_xl,
bound_xl->fops->entrylk,
@ -386,20 +298,21 @@ server_connection_cleanup_flush_cbk (call_frame_t *frame, void *cookie,
xlator_t *this, int32_t op_ret,
int32_t op_errno, dict_t *xdata)
{
int32_t ret = -1;
fd_t *fd = NULL;
int32_t ret = -1;
fd_t *fd = NULL;
client_t *client = NULL;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", cookie, out);
GF_VALIDATE_OR_GOTO ("server", frame, out);
fd = frame->local;
client = frame->root->trans;
fd_unref (fd);
frame->local = NULL;
if (frame->root->trans)
server_conn_unref (frame->root->trans);
gf_client_unref (client);
STACK_DESTROY (frame->root);
ret = 0;
@ -408,8 +321,8 @@ out:
}
int
do_fd_cleanup (xlator_t *this, server_connection_t *conn, call_frame_t *frame,
static int
do_fd_cleanup (xlator_t *this, client_t* client, call_frame_t *frame,
fdentry_t *fdentries, int fd_count)
{
fd_t *fd = NULL;
@ -419,11 +332,10 @@ do_fd_cleanup (xlator_t *this, server_connection_t *conn, call_frame_t *frame,
char *path = NULL;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", conn, out);
GF_VALIDATE_OR_GOTO ("server", frame, out);
GF_VALIDATE_OR_GOTO ("server", fdentries, out);
bound_xl = conn->bound_xl;
bound_xl = client->bound_xl;
for (i = 0;i < fd_count; i++) {
fd = fdentries[i].fd;
@ -438,20 +350,21 @@ do_fd_cleanup (xlator_t *this, server_connection_t *conn, call_frame_t *frame,
ret = inode_path (fd->inode, NULL, &path);
if (ret > 0) {
gf_log (this->name, GF_LOG_INFO, "fd cleanup on "
"%s", path);
gf_log (this->name, GF_LOG_INFO,
"fd cleanup on %s", path);
GF_FREE (path);
} else {
gf_log (this->name, GF_LOG_INFO, "fd cleanup on"
" inode with gfid %s",
gf_log (this->name, GF_LOG_INFO,
"fd cleanup on inode with gfid %s",
uuid_utoa (fd->inode->gfid));
}
tmp_frame->local = fd;
tmp_frame->root->pid = 0;
tmp_frame->root->trans = server_conn_ref (conn);
gf_client_ref (client);
tmp_frame->root->trans = client;
memset (&tmp_frame->root->lk_owner, 0,
sizeof (gf_lkowner_t));
@ -468,17 +381,18 @@ out:
return ret;
}
int
do_connection_cleanup (xlator_t *this, server_connection_t *conn,
struct _lock_table *ltable, fdentry_t *fdentries, int fd_count)
static int
do_connection_cleanup (xlator_t *this, client_t *client,
struct _lock_table *ltable,
fdentry_t *fdentries, int fd_count)
{
int ret = 0;
int saved_ret = 0;
call_frame_t *frame = NULL;
server_state_t *state = NULL;
int ret = 0;
int saved_ret = 0;
call_frame_t *frame = NULL;
server_state_t *state = NULL;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", conn, out);
if (!ltable && !fdentries)
goto out;
@ -489,10 +403,10 @@ do_connection_cleanup (xlator_t *this, server_connection_t *conn,
}
if (ltable)
saved_ret = do_lock_table_cleanup (this, conn, frame, ltable);
saved_ret = do_lock_table_cleanup (this, client, frame, ltable);
if (fdentries != NULL) {
ret = do_fd_cleanup (this, conn, frame, fdentries, fd_count);
ret = do_fd_cleanup (this, client, frame, fdentries, fd_count);
}
state = CALL_STATE (frame);
@ -509,271 +423,60 @@ out:
}
int
server_connection_cleanup (xlator_t *this, server_connection_t *conn,
server_connection_cleanup (xlator_t *this, client_t *client,
int32_t flags)
{
struct _lock_table *ltable = NULL;
fdentry_t *fdentries = NULL;
uint32_t fd_count = 0;
int ret = 0;
struct _lock_table *ltable = NULL;
fdentry_t *fdentries = NULL;
uint32_t fd_count = 0;
int ret = 0;
GF_VALIDATE_OR_GOTO (this->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, conn, out);
GF_VALIDATE_OR_GOTO (this->name, client, out);
GF_VALIDATE_OR_GOTO (this->name, flags, out);
pthread_mutex_lock (&conn->lock);
LOCK (&client->locks_ctx.ltable_lock);
{
if (conn->ltable && (flags & INTERNAL_LOCKS)) {
ltable = conn->ltable;
conn->ltable = gf_lock_table_new ();
if (client->locks_ctx.ltable && (flags & INTERNAL_LOCKS)) {
ltable = client->locks_ctx.ltable;
client->locks_ctx.ltable = gf_lock_table_new ();
}
}
UNLOCK (&client->locks_ctx.ltable_lock);
if (conn->fdtable && (flags & POSIX_LOCKS))
fdentries = gf_fd_fdtable_get_all_fds (conn->fdtable,
LOCK (&client->server_ctx.fdtable_lock);
{
if (client->server_ctx.fdtable && (flags & POSIX_LOCKS))
fdentries = gf_fd_fdtable_get_all_fds (client->server_ctx.fdtable,
&fd_count);
}
pthread_mutex_unlock (&conn->lock);
UNLOCK (&client->server_ctx.fdtable_lock);
if (conn->bound_xl)
ret = do_connection_cleanup (this, conn, ltable,
fdentries, fd_count);
if (client->bound_xl)
ret = do_connection_cleanup (this, client, ltable, fdentries,
fd_count);
out:
return ret;
}
void
server_log_conn_destroy (server_connection_t *conn)
{
int i = 0;
char *rsp_failures_msg = NULL;
char *free_ptr = NULL;
char *msg = NULL;
char *failed_to_rsp = "";
char *sep1 = " - ";
char *sep2 = ", ";
int msg_len = 0;
for (i = GF_FOP_NULL + 1; i < GF_FOP_MAXVALUE; i++) {
msg_len += strlen (gf_fop_list[i]);
msg_len += 20; //Max len of uint64_t is 20
//Separators for fop-string, count
msg_len += strlen (sep1) + strlen (sep2);
}
rsp_failures_msg = GF_CALLOC (msg_len + 1, 1, gf_common_mt_char);
if (rsp_failures_msg == NULL) {
rsp_failures_msg = "";
goto out;
} else {
free_ptr = rsp_failures_msg;
}
msg = rsp_failures_msg;
for (i = GF_FOP_NULL + 1; i < GF_FOP_MAXVALUE; i++) {
if (!conn->rsp_failure_fops[i])
continue;
//Note: Please make sure the size is calculated appropriately
//if you plan to change the format string.
msg += sprintf (msg, "%s%s%"PRIu64"%s", gf_fop_list[i], sep1,
conn->rsp_failure_fops[i], sep2);
}
if (rsp_failures_msg[0]) {
failed_to_rsp = " - Failed to respond to following operations:";
//Remove last comma
rsp_failures_msg[strlen (rsp_failures_msg) - 2] = '\0';
}
out:
gf_log (conn->this->name, GF_LOG_INFO, "destroyed connection of "
"%s %s %s", conn->id, failed_to_rsp, rsp_failures_msg);
GF_FREE (free_ptr);
}
int
server_connection_destroy (xlator_t *this, server_connection_t *conn)
{
xlator_t *bound_xl = NULL;
int32_t ret = -1;
struct list_head inodelk_lockers;
struct list_head entrylk_lockers;
struct _lock_table *ltable = NULL;
fdtable_t *fdtable = NULL;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", conn, out);
bound_xl = (xlator_t *) (conn->bound_xl);
if (bound_xl) {
pthread_mutex_lock (&(conn->lock));
{
if (conn->ltable) {
ltable = conn->ltable;
conn->ltable = NULL;
}
if (conn->fdtable) {
fdtable = conn->fdtable;
conn->fdtable = NULL;
}
}
pthread_mutex_unlock (&conn->lock);
INIT_LIST_HEAD (&inodelk_lockers);
INIT_LIST_HEAD (&entrylk_lockers);
if (ltable) {
list_splice_init (&ltable->inodelk_lockers,
&inodelk_lockers);
list_splice_init (&ltable->entrylk_lockers,
&entrylk_lockers);
GF_FREE (ltable);
}
GF_ASSERT (list_empty (&inodelk_lockers));
GF_ASSERT (list_empty (&entrylk_lockers));
if (fdtable)
gf_fd_fdtable_destroy (fdtable);
}
server_log_conn_destroy (conn);
pthread_mutex_destroy (&conn->lock);
GF_FREE (conn->id);
GF_FREE (conn);
ret = 0;
out:
return ret;
}
server_connection_t*
server_conn_unref (server_connection_t *conn)
{
server_connection_t *todel = NULL;
xlator_t *this = NULL;
pthread_mutex_lock (&conn->lock);
{
conn->ref--;
if (!conn->ref) {
todel = conn;
}
}
pthread_mutex_unlock (&conn->lock);
if (todel) {
this = THIS;
server_connection_destroy (this, todel);
conn = NULL;
}
return conn;
}
server_connection_t*
server_conn_ref (server_connection_t *conn)
{
pthread_mutex_lock (&conn->lock);
{
conn->ref++;
}
pthread_mutex_unlock (&conn->lock);
return conn;
}
server_connection_t *
server_connection_get (xlator_t *this, const char *id)
{
server_connection_t *conn = NULL;
server_connection_t *trav = NULL;
server_conf_t *conf = NULL;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", id, out);
conf = this->private;
pthread_mutex_lock (&conf->mutex);
{
list_for_each_entry (trav, &conf->conns, list) {
if (!strcmp (trav->id, id)) {
conn = trav;
conn->bind_ref++;
goto unlock;
}
}
conn = (void *) GF_CALLOC (1, sizeof (*conn),
gf_server_mt_conn_t);
if (!conn)
goto unlock;
conn->id = gf_strdup (id);
/*'0' denotes uninitialised lock state*/
conn->lk_version = 0;
conn->fdtable = gf_fd_fdtable_alloc ();
conn->ltable = gf_lock_table_new ();
conn->this = this;
conn->bind_ref = 1;
conn->ref = 1;//when bind_ref becomes 0 it calls conn_unref
pthread_mutex_init (&conn->lock, NULL);
list_add (&conn->list, &conf->conns);
}
unlock:
pthread_mutex_unlock (&conf->mutex);
out:
return conn;
}
server_connection_t*
server_connection_put (xlator_t *this, server_connection_t *conn,
gf_boolean_t *detached)
{
server_conf_t *conf = NULL;
gf_boolean_t unref = _gf_false;
if (detached)
*detached = _gf_false;
conf = this->private;
pthread_mutex_lock (&conf->mutex);
{
conn->bind_ref--;
if (!conn->bind_ref) {
list_del_init (&conn->list);
unref = _gf_true;
}
}
pthread_mutex_unlock (&conf->mutex);
if (unref) {
gf_log (this->name, GF_LOG_INFO, "Shutting down connection %s",
conn->id);
if (detached)
*detached = _gf_true;
server_conn_unref (conn);
conn = NULL;
}
return conn;
}
static call_frame_t *
server_alloc_frame (rpcsvc_request_t *req)
{
call_frame_t *frame = NULL;
server_state_t *state = NULL;
server_connection_t *conn = NULL;
call_frame_t *frame = NULL;
server_state_t *state = NULL;
client_t *client = NULL;
GF_VALIDATE_OR_GOTO ("server", req, out);
GF_VALIDATE_OR_GOTO ("server", req->trans, out);
GF_VALIDATE_OR_GOTO ("server", req->svc, out);
GF_VALIDATE_OR_GOTO ("server", req->svc->ctx, out);
conn = (server_connection_t *)req->trans->xl_private;
GF_VALIDATE_OR_GOTO ("server", conn, out);
client = req->trans->xl_private;
GF_VALIDATE_OR_GOTO ("server", client, out);
frame = create_frame (conn->this, req->svc->ctx->pool);
frame = create_frame (client->this, req->svc->ctx->pool);
if (!frame)
goto out;
@ -781,11 +484,11 @@ server_alloc_frame (rpcsvc_request_t *req)
if (!state)
goto out;
if (conn->bound_xl)
state->itable = conn->bound_xl->itable;
if (client->bound_xl)
state->itable = client->bound_xl->itable;
state->xprt = rpc_transport_ref (req->trans);
state->conn = conn;
state->client = client;
state->resolve.fd_no = -1;
state->resolve2.fd_no = -1;
@ -793,20 +496,22 @@ server_alloc_frame (rpcsvc_request_t *req)
frame->root->state = state; /* which socket */
frame->root->unique = 0; /* which call */
frame->this = conn->this;
frame->this = client->this;
out:
return frame;
}
call_frame_t *
get_frame_from_request (rpcsvc_request_t *req)
{
call_frame_t *frame = NULL;
call_frame_t *frame = NULL;
client_t *client = NULL;
GF_VALIDATE_OR_GOTO ("server", req, out);
client = req->trans->xl_private;
frame = server_alloc_frame (req);
if (!frame)
goto out;
@ -818,7 +523,8 @@ get_frame_from_request (rpcsvc_request_t *req)
frame->root->uid = req->uid;
frame->root->gid = req->gid;
frame->root->pid = req->pid;
frame->root->trans = server_conn_ref (req->trans->xl_private);
gf_client_ref (client);
frame->root->trans = client;
frame->root->lk_owner = req->lk_owner;
server_decode_groups (frame, req);
@ -903,84 +609,6 @@ out:
return ret;
}
void
put_server_conn_state (xlator_t *this, rpc_transport_t *xprt)
{
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", xprt, out);
xprt->xl_private = NULL;
out:
return;
}
server_connection_t *
get_server_conn_state (xlator_t *this, rpc_transport_t *xprt)
{
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", xprt, out);
return (server_connection_t *)xprt->xl_private;
out:
return NULL;
}
server_connection_t *
create_server_conn_state (xlator_t *this, rpc_transport_t *xprt)
{
server_connection_t *conn = NULL;
int ret = -1;
GF_VALIDATE_OR_GOTO ("server", this, out);
GF_VALIDATE_OR_GOTO ("server", xprt, out);
conn = GF_CALLOC (1, sizeof (*conn), gf_server_mt_conn_t);
if (!conn)
goto out;
pthread_mutex_init (&conn->lock, NULL);
conn->fdtable = gf_fd_fdtable_alloc ();
if (!conn->fdtable)
goto out;
conn->ltable = gf_lock_table_new ();
if (!conn->ltable)
goto out;
conn->this = this;
xprt->xl_private = conn;
ret = 0;
out:
if (ret)
destroy_server_conn_state (conn);
return conn;
}
void
destroy_server_conn_state (server_connection_t *conn)
{
GF_VALIDATE_OR_GOTO ("server", conn, out);
if (conn->ltable) {
/* TODO */
//FREE (conn->ltable);
;
}
if (conn->fdtable)
gf_fd_fdtable_destroy (conn->fdtable);
pthread_mutex_destroy (&conn->lock);
GF_FREE (conn);
out:
return;
}
void
print_caller (char *str, int size, call_frame_t *frame)
@ -1108,11 +736,12 @@ server_print_params (char *str, int size, server_state_t *state)
"volume=%s,", state->volume);
snprintf (str + filled, size - filled,
"bound_xl=%s}", state->conn->bound_xl->name);
"bound_xl=%s}", state->client->bound_xl->name);
out:
return;
}
int
server_resolve_is_empty (server_resolve_t *resolve)
{
@ -1128,6 +757,7 @@ server_resolve_is_empty (server_resolve_t *resolve)
return 1;
}
void
server_print_reply (call_frame_t *frame, int op_ret, int op_errno)
{
@ -1173,16 +803,16 @@ out:
void
server_print_request (call_frame_t *frame)
{
server_conf_t *conf = NULL;
xlator_t *this = NULL;
server_conf_t *conf = NULL;
xlator_t *this = NULL;
server_state_t *state = NULL;
char *op = "UNKNOWN";
char resolve_vars[256];
char resolve2_vars[256];
char loc_vars[256];
char loc2_vars[256];
char other_vars[512];
char caller[512];
char *op = "UNKNOWN";
GF_VALIDATE_OR_GOTO ("server", frame, out);
@ -1233,13 +863,14 @@ out:
return;
}
int
serialize_rsp_direntp (gf_dirent_t *entries, gfs3_readdirp_rsp *rsp)
{
gf_dirent_t *entry = NULL;
gfs3_dirplist *trav = NULL;
gfs3_dirplist *prev = NULL;
int ret = -1;
gfs3_dirplist *trav = NULL;
gfs3_dirplist *prev = NULL;
int ret = -1;
GF_VALIDATE_OR_GOTO ("server", entries, out);
GF_VALIDATE_OR_GOTO ("server", rsp, out);
@ -1307,10 +938,10 @@ out:
int
serialize_rsp_dirent (gf_dirent_t *entries, gfs3_readdir_rsp *rsp)
{
gf_dirent_t *entry = NULL;
gfs3_dirlist *trav = NULL;
gfs3_dirlist *prev = NULL;
int ret = -1;
gf_dirent_t *entry = NULL;
gfs3_dirlist *trav = NULL;
gfs3_dirlist *prev = NULL;
int ret = -1;
GF_VALIDATE_OR_GOTO ("server", entries, out);
GF_VALIDATE_OR_GOTO ("server", rsp, out);
@ -1337,11 +968,12 @@ out:
return ret;
}
int
readdir_rsp_cleanup (gfs3_readdir_rsp *rsp)
{
gfs3_dirlist *prev = NULL;
gfs3_dirlist *trav = NULL;
gfs3_dirlist *prev = NULL;
gfs3_dirlist *trav = NULL;
trav = rsp->reply;
prev = trav;
@ -1354,6 +986,7 @@ readdir_rsp_cleanup (gfs3_readdir_rsp *rsp)
return 0;
}
int
readdirp_rsp_cleanup (gfs3_readdirp_rsp *rsp)
{
@ -1372,6 +1005,7 @@ readdirp_rsp_cleanup (gfs3_readdirp_rsp *rsp)
return 0;
}
int
gf_server_check_getxattr_cmd (call_frame_t *frame, const char *key)
{
@ -1400,13 +1034,14 @@ gf_server_check_getxattr_cmd (call_frame_t *frame, const char *key)
return 0;
}
int
gf_server_check_setxattr_cmd (call_frame_t *frame, dict_t *dict)
{
server_conf_t *conf = NULL;
rpc_transport_t *xprt = NULL;
uint64_t total_read = 0;
server_conf_t *conf = NULL;
rpc_transport_t *xprt = NULL;
uint64_t total_read = 0;
uint64_t total_write = 0;
conf = frame->this->private;
@ -1427,28 +1062,27 @@ gf_server_check_setxattr_cmd (call_frame_t *frame, dict_t *dict)
return 0;
}
gf_boolean_t
server_cancel_conn_timer (xlator_t *this, server_connection_t *conn)
{
gf_timer_t *timer = NULL;
gf_boolean_t cancelled = _gf_false;
if (!this || !conn) {
gf_log (THIS->name, GF_LOG_ERROR, "Invalid arguments to "
"cancel connection timer");
gf_boolean_t
server_cancel_grace_timer (xlator_t *this, client_t *client)
{
gf_timer_t *timer = NULL;
gf_boolean_t cancelled = _gf_false;
if (!this || !client) {
gf_log (THIS->name, GF_LOG_ERROR,
"Invalid arguments to cancel connection timer");
return cancelled;
}
pthread_mutex_lock (&conn->lock);
LOCK (&client->server_ctx.fdtable_lock);
{
if (!conn->timer)
goto unlock;
timer = conn->timer;
conn->timer = NULL;
if (client->server_ctx.grace_timer) {
timer = client->server_ctx.grace_timer;
client->server_ctx.grace_timer = NULL;
}
}
unlock:
pthread_mutex_unlock (&conn->lock);
UNLOCK (&client->server_ctx.fdtable_lock);
if (timer) {
gf_timer_call_cancel (this->ctx, timer);

View File

@ -15,13 +15,10 @@
#define CALL_STATE(frame) ((server_state_t *)frame->root->state)
#define BOUND_XL(frame) ((xlator_t *) CALL_STATE(frame)->conn->bound_xl)
#define BOUND_XL(frame) ((xlator_t *) CALL_STATE(frame)->client->bound_xl)
#define XPRT_FROM_FRAME(frame) ((rpc_transport_t *) CALL_STATE(frame)->xprt)
#define SERVER_CONNECTION(frame) \
((server_connection_t *) CALL_STATE(frame)->conn)
#define SERVER_CONF(frame) \
((server_conf_t *)XPRT_FROM_FRAME(frame)->this->private)
@ -38,41 +35,18 @@ void free_state (server_state_t *state);
void server_loc_wipe (loc_t *loc);
int32_t
gf_add_locker (server_connection_t *conn, const char *volume,
loc_t *loc,
fd_t *fd,
pid_t pid,
gf_lkowner_t *owner,
glusterfs_fop_t type);
int32_t
gf_del_locker (server_connection_t *conn, const char *volume,
loc_t *loc,
fd_t *fd,
gf_lkowner_t *owner,
glusterfs_fop_t type);
void
server_print_request (call_frame_t *frame);
call_frame_t *
get_frame_from_request (rpcsvc_request_t *req);
int
server_connection_cleanup (xlator_t *this, struct _client_t *client,
int32_t flags);
gf_boolean_t
server_cancel_conn_timer (xlator_t *this, server_connection_t *conn);
void
put_server_conn_state (xlator_t *this, rpc_transport_t *xprt);
server_connection_t *
get_server_conn_state (xlator_t *this, rpc_transport_t *xptr);
server_connection_t *
create_server_conn_state (xlator_t *this, rpc_transport_t *xptr);
void
destroy_server_conn_state (server_connection_t *conn);
server_cancel_grace_timer (xlator_t *this, struct _client_t *client);
int
server_build_config (xlator_t *this, server_conf_t *conf);

View File

@ -15,6 +15,7 @@
#include "server.h"
#include "server-helpers.h"
#include "client_t.h"
int
@ -451,12 +452,10 @@ server_resolve_fd (call_frame_t *frame)
{
server_state_t *state = NULL;
server_resolve_t *resolve = NULL;
server_connection_t *conn = NULL;
uint64_t fd_no = -1;
state = CALL_STATE (frame);
resolve = state->resolve_now;
conn = SERVER_CONNECTION (frame);
fd_no = resolve->fd_no;
@ -465,7 +464,7 @@ server_resolve_fd (call_frame_t *frame)
return 0;
}
state->fd = gf_fd_fdptr_get (conn->fdtable, fd_no);
state->fd = gf_fd_fdptr_get (state->client->server_ctx.fdtable, fd_no);
if (!state->fd) {
gf_log ("", GF_LOG_INFO, "fd not found in context");

File diff suppressed because it is too large Load Diff

View File

@ -26,32 +26,49 @@
#include "defaults.h"
#include "authenticate.h"
#include "rpcsvc.h"
#include "client_t.h"
void
grace_time_handler (void *data)
{
server_connection_t *conn = NULL;
xlator_t *this = NULL;
gf_boolean_t cancelled = _gf_false;
gf_boolean_t detached = _gf_false;
client_t *client = NULL;
xlator_t *this = NULL;
gf_timer_t *timer = NULL;
gf_boolean_t cancelled = _gf_false;
gf_boolean_t detached = _gf_false;
conn = data;
this = conn->this;
client = data;
this = client->this;
GF_VALIDATE_OR_GOTO (THIS->name, conn, out);
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
gf_log (this->name, GF_LOG_INFO, "grace timer expired for %s", conn->id);
gf_log (this->name, GF_LOG_INFO, "grace timer expired for %s",
client->server_ctx.client_uid);
cancelled = server_cancel_conn_timer (this, conn);
LOCK (&client->server_ctx.fdtable_lock);
{
if (client->server_ctx.grace_timer) {
timer = client->server_ctx.grace_timer;
client->server_ctx.grace_timer = NULL;
}
}
UNLOCK (&client->server_ctx.fdtable_lock);
if (timer) {
gf_timer_call_cancel (this->ctx, timer);
cancelled = _gf_true;
}
if (cancelled) {
//conn should not be destroyed in conn_put, so take a ref.
server_conn_ref (conn);
server_connection_put (this, conn, &detached);
/*
* client must not be destroyed in gf_client_put(),
* so take a ref.
*/
gf_client_ref (client);
gf_client_put (client, &detached);
if (detached)//reconnection did not happen :-(
server_connection_cleanup (this, conn,
server_connection_cleanup (this, client,
INTERNAL_LOCKS | POSIX_LOCKS);
server_conn_unref (conn);
gf_client_unref (client);
}
out:
return;
@ -119,20 +136,19 @@ server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg,
struct iovec rsp = {0,};
server_state_t *state = NULL;
char new_iobref = 0;
server_connection_t *conn = NULL;
client_t *client = NULL;
gf_boolean_t lk_heal = _gf_false;
glusterfs_fop_t fop = GF_FOP_NULL;
GF_VALIDATE_OR_GOTO ("server", req, ret);
if (frame) {
state = CALL_STATE (frame);
frame->local = NULL;
conn = SERVER_CONNECTION(frame);
client = state->client;
}
if (conn)
lk_heal = ((server_conf_t *) conn->this->private)->lk_heal;
if (client)
lk_heal = ((server_conf_t *) client->this->private)->lk_heal;
if (!iobref) {
iobref = iobref_new ();
@ -165,17 +181,9 @@ server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg,
*/
iobuf_unref (iob);
if (ret == -1) {
if (frame && conn && !lk_heal) {
fop = frame->root->op;
if ((GF_FOP_NULL < fop) &&
(fop < GF_FOP_MAXVALUE)) {
pthread_mutex_lock (&conn->lock);
{
conn->rsp_failure_fops[fop]++;
}
pthread_mutex_unlock (&conn->lock);
}
server_connection_cleanup (frame->this, conn,
gf_log_callingfn ("", GF_LOG_ERROR, "Reply submission failed");
if (frame && client && !lk_heal) {
server_connection_cleanup (frame->this, client,
INTERNAL_LOCKS | POSIX_LOCKS);
} else {
gf_log_callingfn ("", GF_LOG_ERROR,
@ -193,8 +201,7 @@ ret:
}
if (frame) {
if (frame->root->trans)
server_conn_unref (frame->root->trans);
gf_client_unref (client);
STACK_DESTROY (frame->root);
}
@ -205,173 +212,6 @@ ret:
return ret;
}
/* */
int
server_fd_to_dict (xlator_t *this, dict_t *dict)
{
server_conf_t *conf = NULL;
server_connection_t *trav = NULL;
char key[GF_DUMP_MAX_BUF_LEN] = {0,};
int count = 0;
int ret = -1;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
conf = this->private;
if (!conf)
return -1;
ret = pthread_mutex_trylock (&conf->mutex);
if (ret)
return -1;
list_for_each_entry (trav, &conf->conns, list) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "conn%d", count++);
fdtable_dump_to_dict (trav->fdtable, key, dict);
}
pthread_mutex_unlock (&conf->mutex);
ret = dict_set_int32 (dict, "conncount", count);
out:
return ret;
}
int
server_fd (xlator_t *this)
{
server_conf_t *conf = NULL;
server_connection_t *trav = NULL;
char key[GF_DUMP_MAX_BUF_LEN];
int i = 1;
int ret = -1;
gf_boolean_t section_added = _gf_false;
GF_VALIDATE_OR_GOTO ("server", this, out);
conf = this->private;
if (!conf) {
gf_log (this->name, GF_LOG_WARNING,
"conf null in xlator");
return -1;
}
gf_proc_dump_add_section("xlator.protocol.server.conn");
section_added = _gf_true;
ret = pthread_mutex_trylock (&conf->mutex);
if (ret)
goto out;
list_for_each_entry (trav, &conf->conns, list) {
if (trav->id) {
gf_proc_dump_build_key(key,
"conn","%d.id", i);
gf_proc_dump_write(key, "%s", trav->id);
}
gf_proc_dump_build_key(key,"conn","%d.ref",i)
gf_proc_dump_write(key, "%d", trav->ref);
if (trav->bound_xl) {
gf_proc_dump_build_key(key,
"conn","%d.bound_xl", i);
gf_proc_dump_write(key, "%s", trav->bound_xl->name);
}
gf_proc_dump_build_key(key,
"conn","%d.id", i);
fdtable_dump(trav->fdtable,key);
i++;
}
pthread_mutex_unlock (&conf->mutex);
ret = 0;
out:
if (ret) {
if (section_added == _gf_false)
gf_proc_dump_add_section("xlator.protocol.server.conn");
gf_proc_dump_write ("Unable to dump the list of connections",
"(Lock acquisition failed) %s",
this?this->name:"server");
}
return ret;
}
void
ltable_dump (server_connection_t *trav)
{
char key[GF_DUMP_MAX_BUF_LEN] = {0,};
struct _locker *locker = NULL;
char locker_data[GF_MAX_LOCK_OWNER_LEN] = {0,};
int count = 0;
gf_proc_dump_build_key(key,
"conn","bound_xl.ltable.inodelk.%s",
trav->bound_xl?trav->bound_xl->name:"");
gf_proc_dump_add_section(key);
list_for_each_entry (locker, &trav->ltable->inodelk_lockers, lockers) {
count++;
gf_proc_dump_write("volume", "%s", locker->volume);
if (locker->fd) {
gf_proc_dump_write("fd", "%p", locker->fd);
gf_proc_dump_write("gfid", "%s",
uuid_utoa (locker->fd->inode->gfid));
} else {
gf_proc_dump_write("fd", "%s", locker->loc.path);
gf_proc_dump_write("gfid", "%s",
uuid_utoa (locker->loc.inode->gfid));
}
gf_proc_dump_write("pid", "%d", locker->pid);
gf_proc_dump_write("lock length", "%d", locker->owner.len);
lkowner_unparse (&locker->owner, locker_data,
locker->owner.len);
gf_proc_dump_write("lock owner", "%s", locker_data);
memset (locker_data, 0, sizeof (locker_data));
gf_proc_dump_build_key (key, "inode", "%d", count);
gf_proc_dump_add_section (key);
if (locker->fd)
inode_dump (locker->fd->inode, key);
else
inode_dump (locker->loc.inode, key);
}
count = 0;
locker = NULL;
gf_proc_dump_build_key(key,
"conn","bound_xl.ltable.entrylk.%s",
trav->bound_xl?trav->bound_xl->name:"");
gf_proc_dump_add_section(key);
list_for_each_entry (locker, &trav->ltable->entrylk_lockers,
lockers) {
count++;
gf_proc_dump_write("volume", "%s", locker->volume);
if (locker->fd) {
gf_proc_dump_write("fd", "%p", locker->fd);
gf_proc_dump_write("gfid", "%s",
uuid_utoa (locker->fd->inode->gfid));
} else {
gf_proc_dump_write("fd", "%s", locker->loc.path);
gf_proc_dump_write("gfid", "%s",
uuid_utoa (locker->loc.inode->gfid));
}
gf_proc_dump_write("pid", "%d", locker->pid);
gf_proc_dump_write("lock length", "%d", locker->owner.len);
lkowner_unparse (&locker->owner, locker_data, locker->owner.len);
gf_proc_dump_write("lock data", "%s", locker_data);
memset (locker_data, 0, sizeof (locker_data));
gf_proc_dump_build_key (key, "inode", "%d", count);
gf_proc_dump_add_section (key);
if (locker->fd)
inode_dump (locker->fd->inode, key);
else
inode_dump (locker->loc.inode, key);
}
}
int
server_priv_to_dict (xlator_t *this, dict_t *dict)
@ -478,104 +318,6 @@ out:
return ret;
}
int
server_inode_to_dict (xlator_t *this, dict_t *dict)
{
server_conf_t *conf = NULL;
server_connection_t *trav = NULL;
char key[32] = {0,};
int count = 0;
int ret = -1;
xlator_t *prev_bound_xl = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, this, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
conf = this->private;
if (!conf)
return -1;
ret = pthread_mutex_trylock (&conf->mutex);
if (ret)
return -1;
list_for_each_entry (trav, &conf->conns, list) {
if (trav->bound_xl && trav->bound_xl->itable) {
/* Presently every brick contains only one
* bound_xl for all connections. This will lead
* to duplicating of the inode lists, if listing
* is done for every connection. This simple check
* prevents duplication in the present case. If
* need arises the check can be improved.
*/
if (trav->bound_xl == prev_bound_xl)
continue;
prev_bound_xl = trav->bound_xl;
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "conn%d", count);
inode_table_dump_to_dict (trav->bound_xl->itable,
key, dict);
count++;
}
}
pthread_mutex_unlock (&conf->mutex);
ret = dict_set_int32 (dict, "conncount", count);
out:
if (prev_bound_xl)
prev_bound_xl = NULL;
return ret;
}
int
server_inode (xlator_t *this)
{
server_conf_t *conf = NULL;
server_connection_t *trav = NULL;
char key[GF_DUMP_MAX_BUF_LEN];
int i = 1;
int ret = -1;
GF_VALIDATE_OR_GOTO ("server", this, out);
conf = this->private;
if (!conf) {
gf_log (this->name, GF_LOG_WARNING,
"conf null in xlator");
return -1;
}
ret = pthread_mutex_trylock (&conf->mutex);
if (ret)
goto out;
list_for_each_entry (trav, &conf->conns, list) {
ret = pthread_mutex_trylock (&trav->lock);
if (!ret)
{
gf_proc_dump_build_key(key,
"conn","%d.ltable", i);
gf_proc_dump_add_section(key);
ltable_dump (trav);
i++;
pthread_mutex_unlock (&trav->lock);
}else
continue;
}
pthread_mutex_unlock (&conf->mutex);
ret = 0;
out:
if (ret)
gf_proc_dump_write ("Unable to dump the lock table",
"(Lock acquisition failed) %s",
this?this->name:"server");
return ret;
}
static int
get_auth_types (dict_t *this, char *key, data_t *value, void *data)
@ -718,9 +460,9 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
{
gf_boolean_t detached = _gf_false;
xlator_t *this = NULL;
rpc_transport_t *xprt = NULL;
server_connection_t *conn = NULL;
rpc_transport_t *trans = NULL;
server_conf_t *conf = NULL;
client_t *client = NULL;
if (!xl || !data) {
gf_log_callingfn ("server", GF_LOG_WARNING,
@ -729,7 +471,7 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
}
this = xl;
xprt = data;
trans= data;
conf = this->private;
switch (event) {
@ -737,17 +479,17 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
{
/* Have a structure per new connection */
/* TODO: Should we create anything here at all ? * /
conn = create_server_conn_state (this, xprt);
if (!conn)
client->conn = create_server_conn_state (this, trans);
if (!client->conn)
goto out;
xprt->protocol_private = conn;
trans->protocol_private = client->conn;
*/
INIT_LIST_HEAD (&xprt->list);
INIT_LIST_HEAD (&trans->list);
pthread_mutex_lock (&conf->mutex);
{
list_add_tail (&xprt->list, &conf->xprt_list);
list_add_tail (&trans->list, &conf->xprt_list);
}
pthread_mutex_unlock (&conf->mutex);
@ -760,51 +502,50 @@ server_rpc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
*/
pthread_mutex_lock (&conf->mutex);
{
list_del_init (&xprt->list);
list_del_init (&trans->list);
}
pthread_mutex_unlock (&conf->mutex);
conn = get_server_conn_state (this, xprt);
if (!conn)
client = trans->xl_private;
if (!client)
break;
gf_log (this->name, GF_LOG_INFO, "disconnecting connection"
" from %s, Number of pending operations: %"PRIu64,
conn->id, conn->ref);
"from %s", client->server_ctx.client_uid);
/* If lock self heal is off, then destroy the
conn object, else register a grace timer event */
if (!conf->lk_heal) {
server_conn_ref (conn);
server_connection_put (this, conn, &detached);
gf_client_ref (client);
gf_client_put (client, &detached);
if (detached)
server_connection_cleanup (this, conn,
INTERNAL_LOCKS |
POSIX_LOCKS);
server_conn_unref (conn);
} else {
put_server_conn_state (this, xprt);
server_connection_cleanup (this, conn, INTERNAL_LOCKS);
pthread_mutex_lock (&conn->lock);
{
if (conn->timer)
goto unlock;
gf_log (this->name, GF_LOG_INFO, "starting a grace "
"timer for %s", conn->id);
conn->timer = gf_timer_call_after (this->ctx,
conf->grace_tv,
grace_time_handler,
conn);
}
unlock:
pthread_mutex_unlock (&conn->lock);
server_connection_cleanup (this, client,
INTERNAL_LOCKS | POSIX_LOCKS);
gf_client_unref (client);
break;
}
trans->xl_private = NULL;
server_connection_cleanup (this, client, INTERNAL_LOCKS);
LOCK (&client->server_ctx.fdtable_lock);
{
if (!client->server_ctx.grace_timer) {
gf_log (this->name, GF_LOG_INFO,
"starting a grace timer for %s",
client->server_ctx.client_uid);
client->server_ctx.grace_timer =
gf_timer_call_after (this->ctx,
conf->grace_tv,
grace_time_handler,
client);
}
}
UNLOCK (&client->server_ctx.fdtable_lock);
break;
case RPCSVC_EVENT_TRANSPORT_DESTROY:
/*- conn obj has been disassociated from xprt on first
/*- conn obj has been disassociated from trans on first
* disconnect.
* conn cleanup and destruction is handed over to
* grace_time_handler or the subsequent handler that 'owns'
@ -1038,7 +779,6 @@ init (xlator_t *this)
GF_VALIDATE_OR_GOTO(this->name, conf, out);
INIT_LIST_HEAD (&conf->conns);
INIT_LIST_HEAD (&conf->xprt_list);
pthread_mutex_init (&conf->mutex, NULL);
@ -1229,11 +969,11 @@ struct xlator_cbks cbks;
struct xlator_dumpops dumpops = {
.priv = server_priv,
.fd = server_fd,
.inode = server_inode,
.fd = gf_client_dump_fdtables,
.inode = gf_client_dump_inodes,
.priv_to_dict = server_priv_to_dict,
.fd_to_dict = server_fd_to_dict,
.inode_to_dict = server_inode_to_dict,
.fd_to_dict = gf_client_dump_fdtables_to_dict,
.inode_to_dict = gf_client_dump_inodes_to_dict,
};

View File

@ -33,58 +33,6 @@ typedef enum {
typedef struct _server_state server_state_t;
struct _locker {
struct list_head lockers;
char *volume;
loc_t loc;
fd_t *fd;
gf_lkowner_t owner;
pid_t pid;
};
struct _lock_table {
struct list_head inodelk_lockers;
struct list_head entrylk_lockers;
};
/* private structure per connection (transport object)
* used as transport_t->xl_private
*/
struct _server_connection {
struct list_head list;
char *id;
uint64_t ref;
int bind_ref;
pthread_mutex_t lock;
fdtable_t *fdtable;
struct _lock_table *ltable;
gf_timer_t *timer;
xlator_t *bound_xl;
xlator_t *this;
uint32_t lk_version;
uint64_t rsp_failure_fops[GF_FOP_MAXVALUE];
};
typedef struct _server_connection server_connection_t;
server_connection_t *
server_connection_get (xlator_t *this, const char *id);
server_connection_t *
server_connection_put (xlator_t *this, server_connection_t *conn,
gf_boolean_t *detached);
server_connection_t*
server_conn_unref (server_connection_t *conn);
server_connection_t*
server_conn_ref (server_connection_t *conn);
int
server_connection_cleanup (xlator_t *this, server_connection_t *conn,
int32_t flags);
int server_null (rpcsvc_request_t *req);
struct _volfile_ctx {
@ -106,7 +54,6 @@ struct server_conf {
struct timeval grace_tv;
dict_t *auth_modules;
pthread_mutex_t mutex;
struct list_head conns;
struct list_head xprt_list;
};
typedef struct server_conf server_conf_t;
@ -145,7 +92,7 @@ int
resolve_and_resume (call_frame_t *frame, server_resume_fn_t fn);
struct _server_state {
server_connection_t *conn;
struct _client_t *client;
rpc_transport_t *xprt;
inode_table_t *itable;
@ -205,6 +152,4 @@ server_submit_reply (call_frame_t *frame, rpcsvc_request_t *req, void *arg,
int gf_server_check_setxattr_cmd (call_frame_t *frame, dict_t *dict);
int gf_server_check_getxattr_cmd (call_frame_t *frame, const char *name);
void ltable_dump (server_connection_t *conn);
#endif /* !_SERVER_H */