core: change lk-owner as a 1k buffer
so, NLM can send the lk-owner field directly to the locks translators, while doing the same effort, also enabled sending maximum of 500 aux gid over protocol. Change-Id: I87c2514392748416f7ffe21d5154faad2e413969 Signed-off-by: Amar Tumballi <amar@gluster.com> BUG: 767229 Reviewed-on: http://review.gluster.com/779 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@gluster.com>
This commit is contained in:
parent
0694749c3e
commit
b02afc6d00
@ -50,5 +50,5 @@ convert_fuse_file_lock (struct fuse_file_lock *fl, struct gf_flock *flock,
|
||||
else
|
||||
flock->l_len = fl->end - fl->start + 1;
|
||||
flock->l_pid = fl->pid;
|
||||
flock->l_owner = lk_owner;
|
||||
set_lk_owner_from_uint64 (&flock->l_owner, lk_owner);
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ main ()
|
||||
rpcgen -h -o $hfile $xfile;
|
||||
|
||||
# the '#ifdef' part of file should be fixed
|
||||
sed -i -e 's:\(.*\)-\(.*\)_H_RPCGEN:\1_\2_H_RPCGEN:g' $hfile;
|
||||
sed -i -e 's/-/_/g' $hfile;
|
||||
|
||||
echo "OK";
|
||||
|
||||
|
@ -37,7 +37,7 @@ noinst_HEADERS = common-utils.h defaults.h dict.h glusterfs.h hashfn.h \
|
||||
rbthash.h iatt.h latency.h mem-types.h $(CONTRIBDIR)/uuid/uuidd.h \
|
||||
$(CONTRIBDIR)/uuid/uuid.h $(CONTRIBDIR)/uuid/uuidP.h \
|
||||
$(CONTRIB_BUILDDIR)/uuid/uuid_types.h syncop.h graph-utils.h trie.h run.h \
|
||||
options.h
|
||||
options.h lkowner.h
|
||||
|
||||
EXTRA_DIST = graph.l graph.y
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "logging.h"
|
||||
#include "common-utils.h"
|
||||
@ -48,6 +49,7 @@
|
||||
#include "stack.h"
|
||||
#include "globals.h"
|
||||
#include "md5.h"
|
||||
#include "lkowner.h"
|
||||
|
||||
#ifndef AI_ADDRCONFIG
|
||||
#define AI_ADDRCONFIG 0
|
||||
@ -1656,6 +1658,25 @@ uuid_utoa_r (uuid_t uuid, char *dst)
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*Thread safe conversion function*/
|
||||
char *
|
||||
lkowner_utoa (gf_lkowner_t *lkowner)
|
||||
{
|
||||
char *lkowner_buffer = glusterfs_lkowner_buf_get();
|
||||
lkowner_unparse (lkowner, lkowner_buffer, GF_LKOWNER_BUF_SIZE);
|
||||
return lkowner_buffer;
|
||||
}
|
||||
|
||||
/*Re-entrant conversion function*/
|
||||
char *
|
||||
lkowner_utoa_r (gf_lkowner_t *lkowner, char *dst, int len)
|
||||
{
|
||||
if(!dst)
|
||||
return NULL;
|
||||
lkowner_unparse (lkowner, dst, len);
|
||||
return dst;
|
||||
}
|
||||
|
||||
void _get_md5_str (char *out_str, size_t outlen,
|
||||
const uint8_t *input, int n)
|
||||
{
|
||||
|
@ -444,6 +444,9 @@ char valid_internet_address (char *address);
|
||||
|
||||
char *uuid_utoa (uuid_t uuid);
|
||||
char *uuid_utoa_r (uuid_t uuid, char *dst);
|
||||
char *lkowner_utoa (gf_lkowner_t *lkowner);
|
||||
char *lkowner_utoa_r (gf_lkowner_t *lkowner, char *dst, int len);
|
||||
|
||||
void _get_md5_str (char *out_str, size_t outlen,
|
||||
const uint8_t *input, int n);
|
||||
void gf_array_insertionsort (void *a, int l, int r, size_t elem_size,
|
||||
|
@ -293,6 +293,43 @@ glusterfs_uuid_buf_get ()
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* LKOWNER_BUFFER */
|
||||
|
||||
static pthread_key_t lkowner_buf_key;
|
||||
static char global_lkowner_buf[GF_LKOWNER_BUF_SIZE];
|
||||
void
|
||||
glusterfs_lkowner_buf_destroy (void *ptr)
|
||||
{
|
||||
if (ptr)
|
||||
FREE (ptr);
|
||||
}
|
||||
|
||||
int
|
||||
glusterfs_lkowner_buf_init ()
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = pthread_key_create (&lkowner_buf_key,
|
||||
glusterfs_lkowner_buf_destroy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *
|
||||
glusterfs_lkowner_buf_get ()
|
||||
{
|
||||
char *buf;
|
||||
int ret = 0;
|
||||
|
||||
buf = pthread_getspecific (lkowner_buf_key);
|
||||
if(!buf) {
|
||||
buf = MALLOC (GF_LKOWNER_BUF_SIZE);
|
||||
ret = pthread_setspecific (lkowner_buf_key, (void *) buf);
|
||||
if(ret)
|
||||
buf = global_lkowner_buf;
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
int
|
||||
glusterfs_globals_init ()
|
||||
{
|
||||
@ -323,6 +360,13 @@ glusterfs_globals_init ()
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = glusterfs_lkowner_buf_init ();
|
||||
if(ret) {
|
||||
gf_log ("", GF_LOG_CRITICAL,
|
||||
"ERROR: glusterfs lkowner buffer init failed");
|
||||
goto out;
|
||||
}
|
||||
|
||||
gf_mem_acct_enable_set ();
|
||||
|
||||
ret = synctask_init ();
|
||||
|
@ -22,12 +22,6 @@
|
||||
|
||||
#define GF_DEFAULT_BASE_PORT 24007
|
||||
|
||||
/* This corresponds to the max 16 number of group IDs that are sent through an
|
||||
* RPC request. Since NFS is the only one going to set this, we can be safe
|
||||
* in keeping this size hardcoded.
|
||||
*/
|
||||
#define GF_REQUEST_MAXGROUPS 16
|
||||
|
||||
#include "glusterfs.h"
|
||||
|
||||
/* CTX */
|
||||
@ -52,6 +46,7 @@ int synctask_set (void *);
|
||||
|
||||
/* uuid_buf */
|
||||
char *glusterfs_uuid_buf_get();
|
||||
char *glusterfs_lkowner_buf_get();
|
||||
|
||||
/* init */
|
||||
int glusterfs_globals_init (void);
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include "list.h"
|
||||
#include "logging.h"
|
||||
|
||||
|
||||
#define GF_YES 1
|
||||
#define GF_NO 0
|
||||
|
||||
@ -114,6 +113,12 @@
|
||||
/* TODO: Should we use PATH-MAX? On some systems it may save space */
|
||||
#define ZR_PATH_MAX 4096
|
||||
|
||||
/* GlusterFS's maximum supported Auxilary GIDs */
|
||||
/* TODO: Keeping it to 200, so that we can fit in 2KB buffer for auth data
|
||||
* in RPC server code, if there is ever need for having more aux-gids, then
|
||||
* we have to add aux-gid in payload of actors */
|
||||
#define GF_MAX_AUX_GROUPS 200
|
||||
|
||||
/* NOTE: add members ONLY at the end (just before _MAXVALUE) */
|
||||
typedef enum {
|
||||
GF_FOP_NULL = 0,
|
||||
@ -177,15 +182,6 @@ typedef enum {
|
||||
GF_OP_TYPE_MAX,
|
||||
} gf_op_type_t;
|
||||
|
||||
struct gf_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
off_t l_start;
|
||||
off_t l_len;
|
||||
pid_t l_pid;
|
||||
uint64_t l_owner;
|
||||
};
|
||||
|
||||
/* NOTE: all the miscellaneous flags used by GlusterFS should be listed here */
|
||||
typedef enum {
|
||||
GF_LK_GETLK = 0,
|
||||
@ -385,6 +381,19 @@ typedef enum {
|
||||
GF_EVENT_MAXVAL,
|
||||
} glusterfs_event_t;
|
||||
|
||||
/* gf_lkowner_t is defined in lkowner.h */
|
||||
#include "lkowner.h"
|
||||
|
||||
struct gf_flock {
|
||||
short l_type;
|
||||
short l_whence;
|
||||
off_t l_start;
|
||||
off_t l_len;
|
||||
pid_t l_pid;
|
||||
gf_lkowner_t l_owner;
|
||||
};
|
||||
|
||||
|
||||
extern char *glusterfs_strevent (glusterfs_event_t ev);
|
||||
|
||||
#define GF_MUST_CHECK __attribute__((warn_unused_result))
|
||||
|
92
libglusterfs/src/lkowner.h
Normal file
92
libglusterfs/src/lkowner.h
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Copyright (c) 2012 Red Hat <http://www.redhat.com/>
|
||||
This file is part of GlusterFS.
|
||||
|
||||
GlusterFS is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published
|
||||
by the Free Software Foundation; either version 3 of the License,
|
||||
or (at your option) any later version.
|
||||
|
||||
GlusterFS is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _LK_OWNER_H
|
||||
#define _LK_OWNER_H
|
||||
|
||||
#ifndef _CONFIG_H
|
||||
#define _CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#define GF_MAX_LOCK_OWNER_LEN 1024 /* 1kB as per NLM */
|
||||
|
||||
/* 16strings-16strings-... */
|
||||
#define GF_LKOWNER_BUF_SIZE ((GF_MAX_LOCK_OWNER_LEN * 2) + \
|
||||
(GF_MAX_LOCK_OWNER_LEN / 8))
|
||||
|
||||
typedef struct gf_lkowner_ {
|
||||
int len;
|
||||
char data[GF_MAX_LOCK_OWNER_LEN];
|
||||
} gf_lkowner_t;
|
||||
|
||||
|
||||
/* LKOWNER to string functions */
|
||||
static inline void
|
||||
lkowner_unparse (gf_lkowner_t *lkowner, char *buf, int buf_len)
|
||||
{
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
|
||||
for (i = 0; i < lkowner->len; i++) {
|
||||
if (i && !(i % 8)) {
|
||||
buf[j] = '-';
|
||||
j++;
|
||||
}
|
||||
sprintf (&buf[j], "%02x", lkowner->data[i]);
|
||||
j += 2;
|
||||
if (j == buf_len)
|
||||
break;
|
||||
}
|
||||
if (j < buf_len)
|
||||
buf[j] = '\0';
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_lk_owner_from_ptr (gf_lkowner_t *lkowner, void *data)
|
||||
{
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
|
||||
lkowner->len = sizeof (unsigned long);
|
||||
for (i = 0, j = 0; i < lkowner->len; i++, j += 8) {
|
||||
lkowner->data[i] = (char)((((unsigned long)data) >> j) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_lk_owner_from_uint64 (gf_lkowner_t *lkowner, uint64_t data)
|
||||
{
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
|
||||
lkowner->len = 8;
|
||||
for (i = 0, j = 0; i < lkowner->len; i++, j += 8) {
|
||||
lkowner->data[i] = (char)((data >> j) & 0xff);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if the locks have the same owner */
|
||||
static inline int
|
||||
is_same_lkowner (gf_lkowner_t *l1, gf_lkowner_t *l2)
|
||||
{
|
||||
return ((l1->len == l2->len) && !memcmp(l1->data, l2->data, l1->len));
|
||||
}
|
||||
|
||||
#endif /* _LK_OWNER_H */
|
@ -44,6 +44,7 @@ typedef struct _call_pool_t call_pool_t;
|
||||
#include "list.h"
|
||||
#include "common-utils.h"
|
||||
#include "globals.h"
|
||||
#include "lkowner.h"
|
||||
|
||||
#define NFS_PID 1
|
||||
#define LOW_PRIO_PROC_PID -1
|
||||
@ -106,9 +107,9 @@ struct _call_stack_t {
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
pid_t pid;
|
||||
uint32_t ngrps;
|
||||
uint32_t groups[GF_REQUEST_MAXGROUPS];
|
||||
uint64_t lk_owner;
|
||||
uint16_t ngrps;
|
||||
uint32_t groups[GF_MAX_AUX_GROUPS];
|
||||
gf_lkowner_t lk_owner;
|
||||
|
||||
call_frame_t frames;
|
||||
|
||||
@ -360,7 +361,7 @@ copy_frame (call_frame_t *frame)
|
||||
newstack->op = oldstack->op;
|
||||
newstack->type = oldstack->type;
|
||||
memcpy (newstack->groups, oldstack->groups,
|
||||
sizeof (uint32_t) * GF_REQUEST_MAXGROUPS);
|
||||
sizeof (gid_t) * GF_MAX_AUX_GROUPS);
|
||||
newstack->unique = oldstack->unique;
|
||||
|
||||
newstack->frames.this = frame->this;
|
||||
|
@ -1,8 +1,8 @@
|
||||
lib_LTLIBRARIES = libgfrpc.la
|
||||
|
||||
libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
|
||||
rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c \
|
||||
rpc-common.c
|
||||
rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c
|
||||
|
||||
libgfrpc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
|
||||
|
||||
noinst_HEADERS = rpcsvc.h rpc-transport.h xdr-common.h xdr-rpc.h xdr-rpcclnt.h \
|
||||
|
@ -29,94 +29,9 @@
|
||||
#include "dict.h"
|
||||
#include "xdr-rpc.h"
|
||||
#include "xdr-common.h"
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
bool_t
|
||||
xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
|
||||
int i;
|
||||
|
||||
if (xdrs->x_op == XDR_ENCODE) {
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
} else {
|
||||
IXDR_PUT_U_LONG(buf, objp->pid);
|
||||
IXDR_PUT_U_LONG(buf, objp->uid);
|
||||
IXDR_PUT_U_LONG(buf, objp->gid);
|
||||
IXDR_PUT_U_LONG(buf, objp->ngrps);
|
||||
{
|
||||
register u_int *genp;
|
||||
|
||||
for (i = 0, genp = objp->groups;
|
||||
i < 16; ++i) {
|
||||
IXDR_PUT_U_LONG(buf, *genp++);
|
||||
}
|
||||
}
|
||||
}
|
||||
return TRUE;
|
||||
} else if (xdrs->x_op == XDR_DECODE) {
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
} else {
|
||||
objp->pid = IXDR_GET_U_LONG(buf);
|
||||
objp->uid = IXDR_GET_U_LONG(buf);
|
||||
objp->gid = IXDR_GET_U_LONG(buf);
|
||||
objp->ngrps = IXDR_GET_U_LONG(buf);
|
||||
{
|
||||
register u_int *genp;
|
||||
|
||||
for (i = 0, genp = objp->groups;
|
||||
i < 16; ++i) {
|
||||
*genp++ = IXDR_GET_U_LONG(buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/* V1 */
|
||||
|
||||
ssize_t
|
||||
xdr_to_glusterfs_auth (char *buf, struct auth_glusterfs_parms *req)
|
||||
@ -146,7 +61,7 @@ auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv)
|
||||
{
|
||||
if (!req)
|
||||
return -1;
|
||||
memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
|
||||
memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
|
||||
req->verf.datalen = 0;
|
||||
req->verf.flavour = AUTH_NULL;
|
||||
|
||||
@ -155,9 +70,12 @@ auth_glusterfs_request_init (rpcsvc_request_t *req, void *priv)
|
||||
|
||||
int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
|
||||
{
|
||||
int ret = RPCSVC_AUTH_REJECT;
|
||||
struct auth_glusterfs_parms au = {0,};
|
||||
int gidcount = 0;
|
||||
|
||||
int ret = RPCSVC_AUTH_REJECT;
|
||||
int gidcount = 0;
|
||||
int j = 0;
|
||||
int i = 0;
|
||||
|
||||
if (!req)
|
||||
return ret;
|
||||
@ -173,7 +91,11 @@ int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
|
||||
req->pid = au.pid;
|
||||
req->uid = au.uid;
|
||||
req->gid = au.gid;
|
||||
req->lk_owner = au.lk_owner;
|
||||
req->lk_owner.len = 8;
|
||||
{
|
||||
for (i = 0; i < req->lk_owner.len; i++, j += 8)
|
||||
req->lk_owner.data[i] = (char)((au.lk_owner >> j) & 0xff);
|
||||
}
|
||||
req->auxgidcount = au.ngrps;
|
||||
|
||||
if (req->auxgidcount > 16) {
|
||||
@ -187,8 +109,8 @@ int auth_glusterfs_authenticate (rpcsvc_request_t *req, void *priv)
|
||||
req->auxgids[gidcount] = au.groups[gidcount];
|
||||
|
||||
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
|
||||
", gid: %d, owner: %"PRId64,
|
||||
req->pid, req->uid, req->gid, req->lk_owner);
|
||||
", gid: %d, owner: %s",
|
||||
req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner));
|
||||
ret = RPCSVC_AUTH_ACCEPT;
|
||||
err:
|
||||
return ret;
|
||||
@ -213,3 +135,111 @@ rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options)
|
||||
{
|
||||
return &rpcsvc_auth_glusterfs;
|
||||
}
|
||||
|
||||
/* V2 */
|
||||
|
||||
ssize_t
|
||||
xdr_to_glusterfs_auth_v2 (char *buf, struct auth_glusterfs_parms_v2 *req)
|
||||
{
|
||||
XDR xdr;
|
||||
ssize_t ret = -1;
|
||||
|
||||
if ((!buf) || (!req))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, buf, GF_MAX_AUTH_BYTES, XDR_DECODE);
|
||||
if (!xdr_auth_glusterfs_parms_v2 (&xdr, req)) {
|
||||
gf_log ("", GF_LOG_WARNING,
|
||||
"failed to decode glusterfs v2 parameters");
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = (((size_t)(&xdr)->x_private) - ((size_t)(&xdr)->x_base));
|
||||
ret:
|
||||
return ret;
|
||||
|
||||
}
|
||||
int
|
||||
auth_glusterfs_v2_request_init (rpcsvc_request_t *req, void *priv)
|
||||
{
|
||||
if (!req)
|
||||
return -1;
|
||||
memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
|
||||
req->verf.datalen = 0;
|
||||
req->verf.flavour = AUTH_NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int auth_glusterfs_v2_authenticate (rpcsvc_request_t *req, void *priv)
|
||||
{
|
||||
struct auth_glusterfs_parms_v2 au = {0,};
|
||||
int ret = RPCSVC_AUTH_REJECT;
|
||||
int i = 0;
|
||||
|
||||
if (!req)
|
||||
return ret;
|
||||
|
||||
ret = xdr_to_glusterfs_auth_v2 (req->cred.authdata, &au);
|
||||
if (ret == -1) {
|
||||
gf_log ("", GF_LOG_WARNING,
|
||||
"failed to decode glusterfs credentials");
|
||||
ret = RPCSVC_AUTH_REJECT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
req->pid = au.pid;
|
||||
req->uid = au.uid;
|
||||
req->gid = au.gid;
|
||||
req->lk_owner.len = au.lk_owner.lk_owner_len;
|
||||
req->auxgidcount = au.groups.groups_len;
|
||||
|
||||
if (req->auxgidcount > GF_MAX_AUX_GROUPS) {
|
||||
gf_log ("", GF_LOG_WARNING,
|
||||
"more than max aux gids found (%d) , truncating it "
|
||||
"to %d and continuing", au.groups.groups_len,
|
||||
GF_MAX_AUX_GROUPS);
|
||||
req->auxgidcount = GF_MAX_AUX_GROUPS;
|
||||
}
|
||||
|
||||
if (req->lk_owner.len > GF_MAX_LOCK_OWNER_LEN) {
|
||||
gf_log ("", GF_LOG_WARNING,
|
||||
"lkowner field > 1k, failing authentication");
|
||||
ret = RPCSVC_AUTH_REJECT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < req->auxgidcount; ++i)
|
||||
req->auxgids[i] = au.groups.groups_val[i];
|
||||
|
||||
for (i = 0; i < au.lk_owner.lk_owner_len; ++i)
|
||||
req->lk_owner.data[i] = au.lk_owner.lk_owner_val[i];
|
||||
|
||||
gf_log (GF_RPCSVC, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
|
||||
", gid: %d, owner: %s",
|
||||
req->pid, req->uid, req->gid, lkowner_utoa (&req->lk_owner));
|
||||
ret = RPCSVC_AUTH_ACCEPT;
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
rpcsvc_auth_ops_t auth_glusterfs_ops_v2 = {
|
||||
.transport_init = NULL,
|
||||
.request_init = auth_glusterfs_v2_request_init,
|
||||
.authenticate = auth_glusterfs_v2_authenticate
|
||||
};
|
||||
|
||||
rpcsvc_auth_t rpcsvc_auth_glusterfs_v2 = {
|
||||
.authname = "AUTH_GLUSTERFS-v2",
|
||||
.authnum = AUTH_GLUSTERFS_v2,
|
||||
.authops = &auth_glusterfs_ops_v2,
|
||||
.authprivate = NULL
|
||||
};
|
||||
|
||||
|
||||
rpcsvc_auth_t *
|
||||
rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options)
|
||||
{
|
||||
return &rpcsvc_auth_glusterfs_v2;
|
||||
}
|
||||
|
@ -34,10 +34,10 @@ auth_null_request_init (rpcsvc_request_t *req, void *priv)
|
||||
if (!req)
|
||||
return -1;
|
||||
|
||||
memset (req->cred.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
|
||||
memset (req->cred.authdata, 0, GF_MAX_AUTH_BYTES);
|
||||
req->cred.datalen = 0;
|
||||
|
||||
memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
|
||||
memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
|
||||
req->verf.datalen = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -35,7 +35,7 @@ auth_unix_request_init (rpcsvc_request_t *req, void *priv)
|
||||
{
|
||||
if (!req)
|
||||
return -1;
|
||||
memset (req->verf.authdata, 0, RPCSVC_MAX_AUTH_BYTES);
|
||||
memset (req->verf.authdata, 0, GF_MAX_AUTH_BYTES);
|
||||
req->verf.datalen = 0;
|
||||
req->verf.flavour = AUTH_NULL;
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "protocol-common.h"
|
||||
#include "mem-pool.h"
|
||||
#include "xdr-rpc.h"
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
void
|
||||
rpc_clnt_reply_deinit (struct rpc_req *req, struct mem_pool *pool);
|
||||
@ -1091,7 +1092,7 @@ rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
|
||||
}
|
||||
|
||||
ssize_t
|
||||
xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms *au)
|
||||
xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms_v2 *au)
|
||||
{
|
||||
ssize_t ret = -1;
|
||||
XDR xdr;
|
||||
@ -1099,10 +1100,9 @@ xdr_serialize_glusterfs_auth (char *dest, struct auth_glusterfs_parms *au)
|
||||
if ((!dest) || (!au))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, dest, 1024,
|
||||
XDR_ENCODE);
|
||||
xdrmem_create (&xdr, dest, GF_MAX_AUTH_BYTES, XDR_ENCODE);
|
||||
|
||||
if (!xdr_auth_glusterfs_parms (&xdr, au)) {
|
||||
if (!xdr_auth_glusterfs_parms_v2 (&xdr, au)) {
|
||||
gf_log (THIS->name, GF_LOG_WARNING,
|
||||
"failed to encode auth glusterfs elements");
|
||||
ret = -1;
|
||||
@ -1118,7 +1118,7 @@ ret:
|
||||
|
||||
int
|
||||
rpc_clnt_fill_request (int prognum, int progver, int procnum, int payload,
|
||||
uint64_t xid, struct auth_glusterfs_parms *au,
|
||||
uint64_t xid, struct auth_glusterfs_parms_v2 *au,
|
||||
struct rpc_msg *request, char *auth_data)
|
||||
{
|
||||
int ret = -1;
|
||||
@ -1146,7 +1146,7 @@ rpc_clnt_fill_request (int prognum, int progver, int procnum, int payload,
|
||||
goto out;
|
||||
}
|
||||
|
||||
request->rm_call.cb_cred.oa_flavor = AUTH_GLUSTERFS;
|
||||
request->rm_call.cb_cred.oa_flavor = AUTH_GLUSTERFS_v2;
|
||||
request->rm_call.cb_cred.oa_base = auth_data;
|
||||
request->rm_call.cb_cred.oa_length = ret;
|
||||
|
||||
@ -1198,16 +1198,16 @@ out:
|
||||
struct iobuf *
|
||||
rpc_clnt_record_build_record (struct rpc_clnt *clnt, int prognum, int progver,
|
||||
int procnum, size_t payload, uint64_t xid,
|
||||
struct auth_glusterfs_parms *au,
|
||||
struct auth_glusterfs_parms_v2 *au,
|
||||
struct iovec *recbuf)
|
||||
{
|
||||
struct rpc_msg request = {0, };
|
||||
struct iobuf *request_iob = NULL;
|
||||
char *record = NULL;
|
||||
struct iovec recordhdr = {0, };
|
||||
size_t pagesize = 0;
|
||||
int ret = -1;
|
||||
char auth_data[RPC_CLNT_MAX_AUTH_BYTES] = {0, };
|
||||
struct rpc_msg request = {0, };
|
||||
struct iobuf *request_iob = NULL;
|
||||
char *record = NULL;
|
||||
struct iovec recordhdr = {0, };
|
||||
size_t pagesize = 0;
|
||||
int ret = -1;
|
||||
char auth_data[GF_MAX_AUTH_BYTES] = {0, };
|
||||
|
||||
if ((!clnt) || (!recbuf) || (!au)) {
|
||||
goto out;
|
||||
@ -1237,8 +1237,6 @@ rpc_clnt_record_build_record (struct rpc_clnt *clnt, int prognum, int progver,
|
||||
recordhdr = rpc_clnt_record_build_header (record, pagesize, &request,
|
||||
payload);
|
||||
|
||||
//GF_FREE (request.rm_call.cb_cred.oa_base);
|
||||
|
||||
if (!recordhdr.iov_base) {
|
||||
gf_log (clnt->conn.trans->name, GF_LOG_ERROR,
|
||||
"Failed to build record header");
|
||||
@ -1261,29 +1259,38 @@ rpc_clnt_record (struct rpc_clnt *clnt, call_frame_t *call_frame,
|
||||
rpc_clnt_prog_t *prog,int procnum, size_t payload_len,
|
||||
struct iovec *rpchdr, uint64_t callid)
|
||||
{
|
||||
struct auth_glusterfs_parms au = {0, };
|
||||
struct iobuf *request_iob = NULL;
|
||||
struct auth_glusterfs_parms_v2 au = {0, };
|
||||
struct iobuf *request_iob = NULL;
|
||||
char owner[4] = {0,};
|
||||
|
||||
if (!prog || !rpchdr || !call_frame) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
au.pid = call_frame->root->pid;
|
||||
au.uid = call_frame->root->uid;
|
||||
au.gid = call_frame->root->gid;
|
||||
au.ngrps = call_frame->root->ngrps;
|
||||
au.lk_owner = call_frame->root->lk_owner;
|
||||
if (!au.lk_owner)
|
||||
au.lk_owner = au.pid;
|
||||
au.pid = call_frame->root->pid;
|
||||
au.uid = call_frame->root->uid;
|
||||
au.gid = call_frame->root->gid;
|
||||
au.groups.groups_len = call_frame->root->ngrps;
|
||||
au.lk_owner.lk_owner_len = call_frame->root->lk_owner.len;
|
||||
|
||||
if (au.groups.groups_len)
|
||||
au.groups.groups_val = call_frame->root->groups;
|
||||
|
||||
if (call_frame->root->lk_owner.len)
|
||||
au.lk_owner.lk_owner_val = call_frame->root->lk_owner.data;
|
||||
else {
|
||||
owner[0] = (char)(au.pid & 0xff);
|
||||
owner[1] = (char)((au.pid >> 8) & 0xff);
|
||||
owner[2] = (char)((au.pid >> 16) & 0xff);
|
||||
owner[3] = (char)((au.pid >> 24) & 0xff);
|
||||
|
||||
au.lk_owner.lk_owner_val = owner;
|
||||
au.lk_owner.lk_owner_len = 4;
|
||||
}
|
||||
|
||||
gf_log (clnt->conn.trans->name, GF_LOG_TRACE, "Auth Info: pid: %u, uid: %d"
|
||||
", gid: %d, owner: %"PRId64,
|
||||
au.pid, au.uid, au.gid, au.lk_owner);
|
||||
|
||||
memcpy (au.groups, call_frame->root->groups, sizeof (au.groups));
|
||||
|
||||
//rpc_transport_get_myname (clnt->conn.trans, myname, UNIX_PATH_MAX);
|
||||
//au.aup_machname = myname;
|
||||
", gid: %d, owner: %s", au.pid, au.uid, au.gid,
|
||||
lkowner_utoa (&call_frame->root->lk_owner));
|
||||
|
||||
/* Assuming the client program would like to speak to the same version of
|
||||
* program on server.
|
||||
|
@ -31,8 +31,6 @@ typedef enum {
|
||||
RPC_CLNT_MSG
|
||||
} rpc_clnt_event_t;
|
||||
|
||||
#define AUTH_GLUSTERFS 5
|
||||
#define RPC_CLNT_MAX_AUTH_BYTES 1024
|
||||
|
||||
#define SFRAME_GET_PROGNUM(sframe) (sframe->rpcreq->prog->prognum)
|
||||
#define SFRAME_GET_PROGVER(sframe) (sframe->rpcreq->prog->progver)
|
||||
@ -121,11 +119,10 @@ typedef struct rpcclnt_cb_program {
|
||||
|
||||
|
||||
|
||||
#define RPC_MAX_AUTH_BYTES 400
|
||||
typedef struct rpc_auth_data {
|
||||
int flavour;
|
||||
int datalen;
|
||||
char authdata[RPC_MAX_AUTH_BYTES];
|
||||
int flavour;
|
||||
int datalen;
|
||||
char authdata[GF_MAX_AUTH_BYTES];
|
||||
} rpc_auth_data_t;
|
||||
|
||||
|
||||
|
@ -1,141 +0,0 @@
|
||||
/*
|
||||
Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
|
||||
This file is part of GlusterFS.
|
||||
|
||||
GlusterFS is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published
|
||||
by the Free Software Foundation; either version 3 of the License,
|
||||
or (at your option) any later version.
|
||||
|
||||
GlusterFS is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "logging.h"
|
||||
#include "xdr-common.h"
|
||||
|
||||
ssize_t
|
||||
xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc)
|
||||
{
|
||||
ssize_t ret = -1;
|
||||
XDR xdr;
|
||||
|
||||
if ((!outmsg.iov_base) || (!res) || (!proc))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len,
|
||||
XDR_ENCODE);
|
||||
|
||||
if (!proc (&xdr, res)) {
|
||||
gf_log_callingfn ("xdr", GF_LOG_WARNING,
|
||||
"XDR encoding failed");
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = xdr_encoded_length (xdr);
|
||||
|
||||
ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc)
|
||||
{
|
||||
XDR xdr;
|
||||
ssize_t ret = -1;
|
||||
|
||||
if ((!inmsg.iov_base) || (!args) || (!proc))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
|
||||
XDR_DECODE);
|
||||
|
||||
if (!proc (&xdr, args)) {
|
||||
gf_log_callingfn ("xdr", GF_LOG_WARNING,
|
||||
"XDR decoding failed");
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = xdr_decoded_length (xdr);
|
||||
ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
bool_t
|
||||
xdr_gf_dump_req (XDR *xdrs, gf_dump_req *objp)
|
||||
{
|
||||
if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_gf_prog_detail (XDR *xdrs, gf_prog_detail *objp)
|
||||
{
|
||||
if (!xdr_string (xdrs, &objp->progname, ~0))
|
||||
return FALSE;
|
||||
if (!xdr_u_quad_t (xdrs, &objp->prognum))
|
||||
return FALSE;
|
||||
if (!xdr_u_quad_t (xdrs, &objp->progver))
|
||||
return FALSE;
|
||||
if (!xdr_pointer (xdrs, (char **)&objp->next, sizeof (gf_prog_detail),
|
||||
(xdrproc_t) xdr_gf_prog_detail))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_gf_dump_rsp (XDR *xdrs, gf_dump_rsp *objp)
|
||||
{
|
||||
if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
|
||||
return FALSE;
|
||||
if (!xdr_int (xdrs, &objp->op_ret))
|
||||
return FALSE;
|
||||
if (!xdr_int (xdrs, &objp->op_errno))
|
||||
return FALSE;
|
||||
if (!xdr_pointer (xdrs, (char **)&objp->prog, sizeof (gf_prog_detail),
|
||||
(xdrproc_t) xdr_gf_prog_detail))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
xdr_serialize_dump_rsp (struct iovec outmsg, void *rsp)
|
||||
{
|
||||
return xdr_serialize_generic (outmsg, (void *)rsp,
|
||||
(xdrproc_t)xdr_gf_dump_rsp);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
xdr_to_dump_req (struct iovec inmsg, void *args)
|
||||
{
|
||||
return xdr_to_generic (inmsg, (void *)args,
|
||||
(xdrproc_t)xdr_gf_dump_req);
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
xdr_from_dump_req (struct iovec outmsg, void *rsp)
|
||||
{
|
||||
return xdr_serialize_generic (outmsg, (void *)rsp,
|
||||
(xdrproc_t)xdr_gf_dump_req);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
xdr_to_dump_rsp (struct iovec inmsg, void *args)
|
||||
{
|
||||
return xdr_to_generic (inmsg, (void *)args,
|
||||
(xdrproc_t)xdr_gf_dump_rsp);
|
||||
}
|
@ -29,6 +29,8 @@ rpcsvc_auth_unix_init (rpcsvc_t *svc, dict_t *options);
|
||||
|
||||
extern rpcsvc_auth_t *
|
||||
rpcsvc_auth_glusterfs_init (rpcsvc_t *svc, dict_t *options);
|
||||
extern rpcsvc_auth_t *
|
||||
rpcsvc_auth_glusterfs_v2_init (rpcsvc_t *svc, dict_t *options);
|
||||
|
||||
int
|
||||
rpcsvc_auth_add_initer (struct list_head *list, char *idfier,
|
||||
@ -66,6 +68,16 @@ rpcsvc_auth_add_initers (rpcsvc_t *svc)
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-glusterfs-v2",
|
||||
(rpcsvc_auth_initer_t)
|
||||
rpcsvc_auth_glusterfs_v2_init);
|
||||
if (ret == -1) {
|
||||
gf_log (GF_RPCSVC, GF_LOG_ERROR,
|
||||
"Failed to add AUTH_GLUSTERFS-v2");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = rpcsvc_auth_add_initer (&svc->authschemes, "auth-unix",
|
||||
(rpcsvc_auth_initer_t)
|
||||
rpcsvc_auth_unix_init);
|
||||
@ -434,8 +446,12 @@ rpcsvc_auth_unix_auxgids (rpcsvc_request_t *req, int *arrlen)
|
||||
return NULL;
|
||||
|
||||
/* In case of AUTH_NULL auxgids are not used */
|
||||
if ((req->cred.flavour != AUTH_UNIX) &&
|
||||
(req->cred.flavour != AUTH_GLUSTERFS)) {
|
||||
switch (req->cred.flavour) {
|
||||
case AUTH_UNIX:
|
||||
case AUTH_GLUSTERFS:
|
||||
case AUTH_GLUSTERFS_v2:
|
||||
break;
|
||||
default:
|
||||
gf_log ("rpc", GF_LOG_DEBUG, "auth type not unix or glusterfs");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "iobuf.h"
|
||||
#include "globals.h"
|
||||
#include "xdr-common.h"
|
||||
#include "xdr-generic.h"
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
@ -1687,7 +1689,7 @@ fail:
|
||||
iov.iov_base = rsp_buf;
|
||||
iov.iov_len = dump_rsp_len;
|
||||
|
||||
ret = xdr_serialize_dump_rsp (iov, &rsp);
|
||||
ret = xdr_serialize_generic (iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp);
|
||||
if (ret < 0) {
|
||||
if (req)
|
||||
req->rpc_err = GARBAGE_ARGS;
|
||||
|
@ -43,10 +43,6 @@
|
||||
#include <rpc/rpc_msg.h>
|
||||
#include "compat.h"
|
||||
|
||||
#ifndef NGRPS
|
||||
#define NGRPS 16
|
||||
#endif /* !NGRPS */
|
||||
|
||||
#ifndef MAX_IOVEC
|
||||
#define MAX_IOVEC 16
|
||||
#endif
|
||||
@ -115,8 +111,6 @@
|
||||
#define AUTH_KERB 4 /* kerberos style */
|
||||
#endif /* */
|
||||
|
||||
#define AUTH_GLUSTERFS 5
|
||||
|
||||
typedef struct rpcsvc_program rpcsvc_program_t;
|
||||
|
||||
struct rpcsvc_notify_wrapper {
|
||||
@ -143,11 +137,10 @@ struct rpcsvc_config {
|
||||
int max_block_size;
|
||||
};
|
||||
|
||||
#define RPCSVC_MAX_AUTH_BYTES 400
|
||||
typedef struct rpcsvc_auth_data {
|
||||
int flavour;
|
||||
int datalen;
|
||||
char authdata[RPCSVC_MAX_AUTH_BYTES];
|
||||
char authdata[GF_MAX_AUTH_BYTES];
|
||||
} rpcsvc_auth_data_t;
|
||||
|
||||
#define rpcsvc_auth_flavour(au) ((au).flavour)
|
||||
@ -184,13 +177,13 @@ struct rpcsvc_request {
|
||||
gid_t gid;
|
||||
pid_t pid;
|
||||
|
||||
uint64_t lk_owner;
|
||||
gf_lkowner_t lk_owner;
|
||||
uint64_t gfs_id;
|
||||
|
||||
/* Might want to move this to AUTH_UNIX specific state since this array
|
||||
* is not available for every authentication scheme.
|
||||
*/
|
||||
gid_t auxgids[NGRPS];
|
||||
gid_t auxgids[GF_MAX_AUX_GROUPS];
|
||||
int auxgidcount;
|
||||
|
||||
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <dirent.h>
|
||||
#endif /* __NetBSD__ */
|
||||
|
||||
|
||||
enum gf_dump_procnum {
|
||||
GF_DUMP_NULL,
|
||||
GF_DUMP_DUMP,
|
||||
@ -44,6 +43,7 @@ enum gf_dump_procnum {
|
||||
#define GLUSTER_DUMP_PROGRAM 123451501 /* Completely random */
|
||||
#define GLUSTER_DUMP_VERSION 1
|
||||
|
||||
#define GF_MAX_AUTH_BYTES 2048
|
||||
|
||||
#if GF_DARWIN_HOST_OS
|
||||
#define xdr_u_quad_t xdr_u_int64_t
|
||||
@ -67,52 +67,6 @@ enum gf_dump_procnum {
|
||||
#define xdr_uint32_t xdr_uint32_t
|
||||
#endif
|
||||
|
||||
struct auth_glusterfs_parms {
|
||||
uint64_t lk_owner;
|
||||
u_int pid;
|
||||
u_int uid;
|
||||
u_int gid;
|
||||
u_int ngrps;
|
||||
u_int groups[16];
|
||||
} __attribute__((packed));
|
||||
typedef struct auth_glusterfs_parms auth_glusterfs_parms;
|
||||
|
||||
struct gf_dump_req {
|
||||
uint64_t gfs_id;
|
||||
} __attribute__((packed));
|
||||
typedef struct gf_dump_req gf_dump_req;
|
||||
|
||||
struct gf_prog_detail {
|
||||
char *progname;
|
||||
uint64_t prognum;
|
||||
uint64_t progver;
|
||||
struct gf_prog_detail *next;
|
||||
} __attribute__((packed));
|
||||
typedef struct gf_prog_detail gf_prog_detail;
|
||||
|
||||
struct gf_dump_rsp {
|
||||
uint64_t gfs_id;
|
||||
int op_ret;
|
||||
int op_errno;
|
||||
struct gf_prog_detail *prog;
|
||||
}__attribute__((packed));
|
||||
typedef struct gf_dump_rsp gf_dump_rsp;
|
||||
|
||||
extern bool_t
|
||||
xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp);
|
||||
extern bool_t xdr_gf_dump_req (XDR *, gf_dump_req*);
|
||||
extern bool_t xdr_gf_prog_detail (XDR *, gf_prog_detail*);
|
||||
extern bool_t xdr_gf_dump_rsp (XDR *, gf_dump_rsp*);
|
||||
|
||||
ssize_t
|
||||
xdr_serialize_dump_rsp (struct iovec outmsg, void *rsp);
|
||||
ssize_t
|
||||
xdr_to_dump_req (struct iovec inmsg, void *args);
|
||||
ssize_t
|
||||
xdr_from_dump_req (struct iovec outmsg, void *rsp);
|
||||
ssize_t
|
||||
xdr_to_dump_rsp (struct iovec inmsg, void *args);
|
||||
|
||||
/* Returns the address of the byte that follows the
|
||||
* last byte used for decoding the previous xdr component.
|
||||
* E.g. once the RPC call for NFS has been decoded, the macro will return
|
||||
|
@ -17,7 +17,7 @@
|
||||
<http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _XDR_RPC_H
|
||||
#ifndef _XDR_RPC_H_
|
||||
#define _XDR_RPC_H_
|
||||
|
||||
#ifndef _CONFIG_H
|
||||
@ -39,6 +39,13 @@
|
||||
#include <rpc/xdr.h>
|
||||
#include <sys/uio.h>
|
||||
|
||||
#include "xdr-common.h"
|
||||
|
||||
typedef enum {
|
||||
AUTH_GLUSTERFS = 5,
|
||||
AUTH_GLUSTERFS_v2 = 6,
|
||||
} gf_rpc_authtype_t;
|
||||
|
||||
/* Converts a given network buffer from its XDR format to a structure
|
||||
* that contains everything an RPC call needs to work.
|
||||
*/
|
||||
|
@ -15,6 +15,6 @@ noinst_HEADERS = rdma.h name.h
|
||||
|
||||
AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \
|
||||
-I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src/ \
|
||||
-I$(top_srcdir)/xlators/protocol/lib/src -shared -nostartfiles $(GF_CFLAGS)
|
||||
-I$(top_srcdir)/rpc/xdr/src -shared -nostartfiles $(GF_CFLAGS)
|
||||
|
||||
CLEANFILES = *~
|
||||
|
@ -9,14 +9,14 @@ libgfxdr_la_CPPFLAGS = -D_FILE_OFFSET_BITS=64 -D__USE_FILE_OFFSET64 \
|
||||
libgfxdr_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
|
||||
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
|
||||
|
||||
libgfxdr_la_SOURCES = xdr-generic.c \
|
||||
libgfxdr_la_SOURCES = xdr-generic.c rpc-common-xdr.c \
|
||||
glusterfs3-xdr.c \
|
||||
cli1-xdr.c \
|
||||
glusterd1-xdr.c \
|
||||
portmap-xdr.c \
|
||||
xdr-nfs3.c msg-nfs3.c
|
||||
|
||||
noinst_HEADERS = xdr-generic.h \
|
||||
noinst_HEADERS = xdr-generic.h rpc-common-xdr.h \
|
||||
glusterfs3-xdr.h glusterfs3.h \
|
||||
cli1-xdr.h \
|
||||
glusterd1-xdr.h \
|
||||
|
@ -80,7 +80,7 @@ xdr_gf_proto_flock (XDR *xdrs, gf_proto_flock *objp)
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_quad_t (xdrs, &objp->owner))
|
||||
if (!xdr_bytes (xdrs, (char **)&objp->lk_owner.lk_owner_val, (u_int *) &objp->lk_owner.lk_owner_len, ~0))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
@ -63,7 +63,10 @@ struct gf_proto_flock {
|
||||
u_quad_t start;
|
||||
u_quad_t len;
|
||||
u_int pid;
|
||||
u_quad_t owner;
|
||||
struct {
|
||||
u_int lk_owner_len;
|
||||
char *lk_owner_val;
|
||||
} lk_owner;
|
||||
};
|
||||
typedef struct gf_proto_flock gf_proto_flock;
|
||||
|
||||
|
@ -19,7 +19,7 @@ struct gf_proto_flock {
|
||||
unsigned hyper start;
|
||||
unsigned hyper len;
|
||||
unsigned int pid;
|
||||
unsigned hyper owner;
|
||||
opaque lk_owner<>;
|
||||
} ;
|
||||
|
||||
|
||||
|
@ -197,7 +197,11 @@ gf_proto_flock_to_flock (struct gf_proto_flock *gf_proto_flock, struct gf_flock
|
||||
gf_flock->l_start = gf_proto_flock->start;
|
||||
gf_flock->l_len = gf_proto_flock->len;
|
||||
gf_flock->l_pid = gf_proto_flock->pid;
|
||||
gf_flock->l_owner = gf_proto_flock->owner;
|
||||
gf_flock->l_owner.len = gf_proto_flock->lk_owner.lk_owner_len;
|
||||
if (gf_flock->l_owner.len &&
|
||||
(gf_flock->l_owner.len < GF_MAX_LOCK_OWNER_LEN))
|
||||
memcpy (gf_flock->l_owner.data, gf_proto_flock->lk_owner.lk_owner_val,
|
||||
gf_flock->l_owner.len);
|
||||
}
|
||||
|
||||
|
||||
@ -212,7 +216,9 @@ gf_proto_flock_from_flock (struct gf_proto_flock *gf_proto_flock, struct gf_floc
|
||||
gf_proto_flock->start = (gf_flock->l_start);
|
||||
gf_proto_flock->len = (gf_flock->l_len);
|
||||
gf_proto_flock->pid = (gf_flock->l_pid);
|
||||
gf_proto_flock->owner = (gf_flock->l_owner);
|
||||
gf_proto_flock->lk_owner.lk_owner_len = gf_flock->l_owner.len;
|
||||
if (gf_flock->l_owner.len)
|
||||
gf_proto_flock->lk_owner.lk_owner_val = gf_flock->l_owner.data;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -63,84 +63,6 @@ ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
ssize_t
|
||||
xdr_serialize_generic (struct iovec outmsg, void *res, xdrproc_t proc)
|
||||
{
|
||||
ssize_t ret = -1;
|
||||
XDR xdr;
|
||||
|
||||
if ((!outmsg.iov_base) || (!res) || (!proc))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, outmsg.iov_base, (unsigned int)outmsg.iov_len,
|
||||
XDR_ENCODE);
|
||||
|
||||
if (!proc (&xdr, res)) {
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = xdr_encoded_length (xdr);
|
||||
|
||||
ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
xdr_to_generic (struct iovec inmsg, void *args, xdrproc_t proc)
|
||||
{
|
||||
XDR xdr;
|
||||
ssize_t ret = -1;
|
||||
|
||||
if ((!inmsg.iov_base) || (!args) || (!proc))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
|
||||
XDR_DECODE);
|
||||
|
||||
if (!proc (&xdr, args)) {
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = xdr_decoded_length (xdr);
|
||||
ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
xdr_to_generic_payload (struct iovec inmsg, void *args, xdrproc_t proc,
|
||||
struct iovec *pendingpayload)
|
||||
{
|
||||
XDR xdr;
|
||||
ssize_t ret = -1;
|
||||
|
||||
if ((!inmsg.iov_base) || (!args) || (!proc))
|
||||
return -1;
|
||||
|
||||
xdrmem_create (&xdr, inmsg.iov_base, (unsigned int)inmsg.iov_len,
|
||||
XDR_DECODE);
|
||||
|
||||
if (!proc (&xdr, args)) {
|
||||
ret = -1;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
ret = xdr_decoded_length (xdr);
|
||||
|
||||
if (pendingpayload) {
|
||||
pendingpayload->iov_base = xdr_decoded_remaining_addr (xdr);
|
||||
pendingpayload->iov_len = xdr_decoded_remaining_len (xdr);
|
||||
}
|
||||
|
||||
ret:
|
||||
return ret;
|
||||
}
|
||||
*/
|
||||
|
||||
/* Translate the mountres3 structure in res into XDR format into memory
|
||||
* referenced by outmsg.iov_base.
|
||||
* Returns the number of bytes used in encoding into XDR format.
|
||||
|
232
rpc/xdr/src/rpc-common-xdr.c
Normal file
232
rpc/xdr/src/rpc-common-xdr.c
Normal file
@ -0,0 +1,232 @@
|
||||
/*
|
||||
Copyright (c) 2007-2011 Gluster, Inc. <http://www.gluster.com>
|
||||
This file is part of GlusterFS.
|
||||
|
||||
GlusterFS is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published
|
||||
by the Free Software Foundation; either version 3 of the License,
|
||||
or (at your option) any later version.
|
||||
|
||||
GlusterFS is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "xdr-common.h"
|
||||
#include "compat.h"
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#if __GNUC__ >= 4
|
||||
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Please do not edit this file.
|
||||
* It was generated using rpcgen.
|
||||
*/
|
||||
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
bool_t
|
||||
xdr_auth_glusterfs_parms_v2 (XDR *xdrs, auth_glusterfs_parms_v2 *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
buf = NULL;
|
||||
|
||||
|
||||
if (xdrs->x_op == XDR_ENCODE) {
|
||||
buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
|
||||
} else {
|
||||
IXDR_PUT_LONG(buf, objp->pid);
|
||||
IXDR_PUT_U_LONG(buf, objp->uid);
|
||||
IXDR_PUT_U_LONG(buf, objp->gid);
|
||||
}
|
||||
if (!xdr_array (xdrs, (char **)&objp->groups.groups_val, (u_int *) &objp->groups.groups_len, ~0,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
if (!xdr_bytes (xdrs, (char **)&objp->lk_owner.lk_owner_val, (u_int *) &objp->lk_owner.lk_owner_len, ~0))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
} else if (xdrs->x_op == XDR_DECODE) {
|
||||
buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
|
||||
} else {
|
||||
objp->pid = IXDR_GET_LONG(buf);
|
||||
objp->uid = IXDR_GET_U_LONG(buf);
|
||||
objp->gid = IXDR_GET_U_LONG(buf);
|
||||
}
|
||||
if (!xdr_array (xdrs, (char **)&objp->groups.groups_val, (u_int *) &objp->groups.groups_len, ~0,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
if (!xdr_bytes (xdrs, (char **)&objp->lk_owner.lk_owner_val, (u_int *) &objp->lk_owner.lk_owner_len, ~0))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (!xdr_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_array (xdrs, (char **)&objp->groups.groups_val, (u_int *) &objp->groups.groups_len, ~0,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
if (!xdr_bytes (xdrs, (char **)&objp->lk_owner.lk_owner_val, (u_int *) &objp->lk_owner.lk_owner_len, ~0))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_auth_glusterfs_parms (XDR *xdrs, auth_glusterfs_parms *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
int i;
|
||||
buf = NULL;
|
||||
|
||||
|
||||
if (xdrs->x_op == XDR_ENCODE) {
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
} else {
|
||||
IXDR_PUT_U_LONG(buf, objp->pid);
|
||||
IXDR_PUT_U_LONG(buf, objp->uid);
|
||||
IXDR_PUT_U_LONG(buf, objp->gid);
|
||||
IXDR_PUT_U_LONG(buf, objp->ngrps);
|
||||
{
|
||||
register u_int *genp;
|
||||
|
||||
for (i = 0, genp = objp->groups;
|
||||
i < 16; ++i) {
|
||||
IXDR_PUT_U_LONG(buf, *genp++);
|
||||
}
|
||||
}
|
||||
}
|
||||
return TRUE;
|
||||
} else if (xdrs->x_op == XDR_DECODE) {
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
buf = XDR_INLINE (xdrs, (4 + 16 )* BYTES_PER_XDR_UNIT);
|
||||
if (buf == NULL) {
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
} else {
|
||||
objp->pid = IXDR_GET_U_LONG(buf);
|
||||
objp->uid = IXDR_GET_U_LONG(buf);
|
||||
objp->gid = IXDR_GET_U_LONG(buf);
|
||||
objp->ngrps = IXDR_GET_U_LONG(buf);
|
||||
{
|
||||
register u_int *genp;
|
||||
|
||||
for (i = 0, genp = objp->groups;
|
||||
i < 16; ++i) {
|
||||
*genp++ = IXDR_GET_U_LONG(buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (!xdr_u_quad_t (xdrs, &objp->lk_owner))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->pid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->uid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->gid))
|
||||
return FALSE;
|
||||
if (!xdr_u_int (xdrs, &objp->ngrps))
|
||||
return FALSE;
|
||||
if (!xdr_vector (xdrs, (char *)objp->groups, 16,
|
||||
sizeof (u_int), (xdrproc_t) xdr_u_int))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_gf_dump_req (XDR *xdrs, gf_dump_req *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
buf = NULL;
|
||||
|
||||
if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_gf_prog_detail (XDR *xdrs, gf_prog_detail *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
buf = NULL;
|
||||
|
||||
if (!xdr_string (xdrs, &objp->progname, ~0))
|
||||
return FALSE;
|
||||
if (!xdr_u_quad_t (xdrs, &objp->prognum))
|
||||
return FALSE;
|
||||
if (!xdr_u_quad_t (xdrs, &objp->progver))
|
||||
return FALSE;
|
||||
if (!xdr_pointer (xdrs, (char **)&objp->next, sizeof (gf_prog_detail), (xdrproc_t) xdr_gf_prog_detail))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
bool_t
|
||||
xdr_gf_dump_rsp (XDR *xdrs, gf_dump_rsp *objp)
|
||||
{
|
||||
register int32_t *buf;
|
||||
buf = NULL;
|
||||
|
||||
if (!xdr_u_quad_t (xdrs, &objp->gfs_id))
|
||||
return FALSE;
|
||||
if (!xdr_int (xdrs, &objp->op_ret))
|
||||
return FALSE;
|
||||
if (!xdr_int (xdrs, &objp->op_errno))
|
||||
return FALSE;
|
||||
if (!xdr_pointer (xdrs, (char **)&objp->prog, sizeof (gf_prog_detail), (xdrproc_t) xdr_gf_prog_detail))
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
113
rpc/xdr/src/rpc-common-xdr.h
Normal file
113
rpc/xdr/src/rpc-common-xdr.h
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright (c) 2007-2011 Gluster, Inc. <http://www.gluster.com>
|
||||
This file is part of GlusterFS.
|
||||
|
||||
GlusterFS is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published
|
||||
by the Free Software Foundation; either version 3 of the License,
|
||||
or (at your option) any later version.
|
||||
|
||||
GlusterFS is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "xdr-common.h"
|
||||
#include "compat.h"
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#if __GNUC__ >= 4
|
||||
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Please do not edit this file.
|
||||
* It was generated using rpcgen.
|
||||
*/
|
||||
|
||||
#ifndef _RPC_COMMON_XDR_H_RPCGEN
|
||||
#define _RPC_COMMON_XDR_H_RPCGEN
|
||||
|
||||
#include <rpc/rpc.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
struct auth_glusterfs_parms_v2 {
|
||||
int pid;
|
||||
u_int uid;
|
||||
u_int gid;
|
||||
struct {
|
||||
u_int groups_len;
|
||||
u_int *groups_val;
|
||||
} groups;
|
||||
struct {
|
||||
u_int lk_owner_len;
|
||||
char *lk_owner_val;
|
||||
} lk_owner;
|
||||
};
|
||||
typedef struct auth_glusterfs_parms_v2 auth_glusterfs_parms_v2;
|
||||
|
||||
struct auth_glusterfs_parms {
|
||||
u_quad_t lk_owner;
|
||||
u_int pid;
|
||||
u_int uid;
|
||||
u_int gid;
|
||||
u_int ngrps;
|
||||
u_int groups[16];
|
||||
};
|
||||
typedef struct auth_glusterfs_parms auth_glusterfs_parms;
|
||||
|
||||
struct gf_dump_req {
|
||||
u_quad_t gfs_id;
|
||||
};
|
||||
typedef struct gf_dump_req gf_dump_req;
|
||||
|
||||
struct gf_prog_detail {
|
||||
char *progname;
|
||||
u_quad_t prognum;
|
||||
u_quad_t progver;
|
||||
struct gf_prog_detail *next;
|
||||
};
|
||||
typedef struct gf_prog_detail gf_prog_detail;
|
||||
|
||||
struct gf_dump_rsp {
|
||||
u_quad_t gfs_id;
|
||||
int op_ret;
|
||||
int op_errno;
|
||||
struct gf_prog_detail *prog;
|
||||
};
|
||||
typedef struct gf_dump_rsp gf_dump_rsp;
|
||||
|
||||
/* the xdr functions */
|
||||
|
||||
#if defined(__STDC__) || defined(__cplusplus)
|
||||
extern bool_t xdr_auth_glusterfs_parms_v2 (XDR *, auth_glusterfs_parms_v2*);
|
||||
extern bool_t xdr_auth_glusterfs_parms (XDR *, auth_glusterfs_parms*);
|
||||
extern bool_t xdr_gf_dump_req (XDR *, gf_dump_req*);
|
||||
extern bool_t xdr_gf_prog_detail (XDR *, gf_prog_detail*);
|
||||
extern bool_t xdr_gf_dump_rsp (XDR *, gf_dump_rsp*);
|
||||
|
||||
#else /* K&R C */
|
||||
extern bool_t xdr_auth_glusterfs_parms_v2 ();
|
||||
extern bool_t xdr_auth_glusterfs_parms ();
|
||||
extern bool_t xdr_gf_dump_req ();
|
||||
extern bool_t xdr_gf_prog_detail ();
|
||||
extern bool_t xdr_gf_dump_rsp ();
|
||||
|
||||
#endif /* K&R C */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !_RPC_COMMON_XDR_H_RPCGEN */
|
39
rpc/xdr/src/rpc-common-xdr.x
Normal file
39
rpc/xdr/src/rpc-common-xdr.x
Normal file
@ -0,0 +1,39 @@
|
||||
/* This file has definition of few XDR structures which are
|
||||
* not captured in any section specific file */
|
||||
|
||||
struct auth_glusterfs_parms_v2 {
|
||||
int pid;
|
||||
unsigned int uid;
|
||||
unsigned int gid;
|
||||
unsigned int groups<>;
|
||||
opaque lk_owner<>;
|
||||
};
|
||||
|
||||
struct auth_glusterfs_parms {
|
||||
unsigned hyper lk_owner;
|
||||
unsigned int pid;
|
||||
unsigned int uid;
|
||||
unsigned int gid;
|
||||
unsigned int ngrps;
|
||||
unsigned groups[16];
|
||||
};
|
||||
|
||||
struct gf_dump_req {
|
||||
unsigned hyper gfs_id;
|
||||
};
|
||||
|
||||
|
||||
struct gf_prog_detail {
|
||||
string progname<>;
|
||||
unsigned hyper prognum;
|
||||
unsigned hyper progver;
|
||||
struct gf_prog_detail *next;
|
||||
};
|
||||
|
||||
|
||||
struct gf_dump_rsp {
|
||||
unsigned hyper gfs_id;
|
||||
int op_ret;
|
||||
int op_errno;
|
||||
struct gf_prog_detail *prog;
|
||||
};
|
@ -62,7 +62,8 @@ afr_set_lk_owner (call_frame_t *frame, xlator_t *this)
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"Setting lk-owner=%llu",
|
||||
(unsigned long long) (unsigned long)frame->root);
|
||||
frame->root->lk_owner = (uint64_t) (unsigned long)frame->root;
|
||||
|
||||
set_lk_owner_from_ptr (&frame->root->lk_owner, frame->root);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -115,7 +116,7 @@ internal_lock_count (call_frame_t *frame, xlator_t *this)
|
||||
|
||||
static void
|
||||
afr_print_inodelk (char *str, int size, int cmd,
|
||||
struct gf_flock *flock, uint64_t owner)
|
||||
struct gf_flock *flock, gf_lkowner_t *owner)
|
||||
{
|
||||
char *cmd_str = NULL;
|
||||
char *type_str = NULL;
|
||||
@ -163,11 +164,11 @@ afr_print_inodelk (char *str, int size, int cmd,
|
||||
}
|
||||
|
||||
snprintf (str, size, "lock=INODELK, cmd=%s, type=%s, "
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%llu",
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%s",
|
||||
cmd_str, type_str, (unsigned long long) flock->l_start,
|
||||
(unsigned long long) flock->l_len,
|
||||
(unsigned long long) flock->l_pid,
|
||||
(unsigned long long) owner);
|
||||
lkowner_utoa (owner));
|
||||
|
||||
}
|
||||
|
||||
@ -183,11 +184,11 @@ afr_print_lockee (char *str, int size, loc_t *loc, fd_t *fd,
|
||||
|
||||
void
|
||||
afr_print_entrylk (char *str, int size, const char *basename,
|
||||
uint64_t owner)
|
||||
gf_lkowner_t *owner)
|
||||
{
|
||||
snprintf (str, size, "Basename=%s, lk-owner=%llu",
|
||||
snprintf (str, size, "Basename=%s, lk-owner=%s",
|
||||
basename ? basename : "<nul>",
|
||||
(unsigned long long)owner);
|
||||
lkowner_utoa (owner));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -302,7 +303,7 @@ afr_trace_inodelk_in (call_frame_t *frame, afr_lock_call_type_t lock_call_type,
|
||||
return;
|
||||
}
|
||||
|
||||
afr_print_inodelk (lock, 256, cmd, flock, frame->root->lk_owner);
|
||||
afr_print_inodelk (lock, 256, cmd, flock, &frame->root->lk_owner);
|
||||
afr_print_lockee (lockee, 256, &local->loc, local->fd, child_index);
|
||||
|
||||
afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
|
||||
@ -339,7 +340,7 @@ afr_trace_entrylk_in (call_frame_t *frame, afr_lock_call_type_t lock_call_type,
|
||||
return;
|
||||
}
|
||||
|
||||
afr_print_entrylk (lock, 256, basename, frame->root->lk_owner);
|
||||
afr_print_entrylk (lock, 256, basename, &frame->root->lk_owner);
|
||||
afr_print_lockee (lockee, 256, &local->loc, local->fd, child_index);
|
||||
|
||||
afr_set_lock_call_type (lock_call_type, lock_call_type_str, int_lock);
|
||||
@ -602,8 +603,8 @@ afr_unlock_inodelk (call_frame_t *frame, xlator_t *this)
|
||||
flock.l_type = F_UNLCK;
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG, "attempting data unlock range %"PRIu64
|
||||
" %"PRIu64" by %"PRIu64, flock.l_start, flock.l_len,
|
||||
frame->root->lk_owner);
|
||||
" %"PRIu64" by %s", flock.l_start, flock.l_len,
|
||||
lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
call_count = afr_locked_nodes_count (int_lock->inode_locked_nodes,
|
||||
priv->child_count);
|
||||
@ -1422,8 +1423,8 @@ afr_nonblocking_inodelk (call_frame_t *frame, xlator_t *this)
|
||||
flock.l_type = int_lock->lk_flock.l_type;
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG, "attempting data lock range %"PRIu64
|
||||
" %"PRIu64" by %"PRIu64, flock.l_start, flock.l_len,
|
||||
frame->root->lk_owner);
|
||||
" %"PRIu64" by %s", flock.l_start, flock.l_len,
|
||||
lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
full_flock.l_type = int_lock->lk_flock.l_type;
|
||||
|
||||
|
@ -395,8 +395,8 @@ afr_sh_data_erase_pending (call_frame_t *frame, xlator_t *this)
|
||||
|
||||
afr_sh_pending_to_delta (priv, sh->xattr, sh->delta_matrix, sh->success,
|
||||
priv->child_count, AFR_DATA_TRANSACTION);
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Delta matrix for: %"PRIu64,
|
||||
frame->root->lk_owner);
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Delta matrix for: %s",
|
||||
lkowner_utoa (&frame->root->lk_owner));
|
||||
afr_sh_print_pending_matrix (sh->delta_matrix, this);
|
||||
|
||||
erase_xattr = GF_CALLOC (sizeof (*erase_xattr), priv->child_count,
|
||||
@ -658,8 +658,9 @@ afr_sh_data_fix (call_frame_t *frame, xlator_t *this)
|
||||
sh = &local->self_heal;
|
||||
priv = this->private;
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Pending matrix for: %"PRIu64,
|
||||
frame->root->lk_owner);
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Pending matrix for: %s",
|
||||
lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
nsources = afr_build_sources (this, sh->xattr, sh->buf, sh->pending_matrix,
|
||||
sh->sources, sh->success_children,
|
||||
AFR_DATA_TRANSACTION, NULL, _gf_false);
|
||||
@ -1131,14 +1132,16 @@ afr_sh_data_post_blocking_inodelk_cbk (call_frame_t *frame, xlator_t *this)
|
||||
|
||||
if (int_lock->lock_op_ret < 0) {
|
||||
gf_log (this->name, GF_LOG_ERROR, "Blocking data inodelks "
|
||||
"failed for %s. by %"PRIu64,
|
||||
local->loc.path, frame->root->lk_owner);
|
||||
"failed for %s. by %s",
|
||||
local->loc.path, lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
sh->data_lock_failure_handler (frame, this);
|
||||
} else {
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Blocking data inodelks "
|
||||
"done for %s by %"PRIu64". Proceding to self-heal",
|
||||
local->loc.path, frame->root->lk_owner);
|
||||
"done for %s by %s. Proceding to self-heal",
|
||||
local->loc.path, lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
sh->data_lock_success_handler (frame, this);
|
||||
}
|
||||
|
||||
@ -1158,15 +1161,16 @@ afr_sh_data_post_nonblocking_inodelk_cbk (call_frame_t *frame, xlator_t *this)
|
||||
|
||||
if (int_lock->lock_op_ret < 0) {
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Non Blocking data inodelks "
|
||||
"failed for %s. by %"PRIu64,
|
||||
local->loc.path, frame->root->lk_owner);
|
||||
"failed for %s. by %s",
|
||||
local->loc.path, lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
int_lock->lock_cbk = afr_sh_data_post_blocking_inodelk_cbk;
|
||||
afr_blocking_lock (frame, this);
|
||||
} else {
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG, "Non Blocking data inodelks "
|
||||
"done for %s by %"PRIu64". Proceeding to self-heal",
|
||||
local->loc.path, frame->root->lk_owner);
|
||||
"done for %s by %s. Proceeding to self-heal",
|
||||
local->loc.path, lkowner_utoa (&frame->root->lk_owner));
|
||||
sh->data_lock_success_handler (frame, this);
|
||||
}
|
||||
|
||||
|
@ -694,7 +694,7 @@ pump_start (call_frame_t *pump_frame, xlator_t *this)
|
||||
priv = this->private;
|
||||
pump_priv = priv->pump_private;
|
||||
|
||||
pump_frame->root->lk_owner = (uint64_t) (unsigned long)pump_frame->root;
|
||||
afr_set_lk_owner (pump_frame, this);
|
||||
pump_pid = (uint64_t) (unsigned long)pump_frame->root;
|
||||
|
||||
ret = synctask_new (pump_priv->env, pump_task,
|
||||
@ -708,8 +708,8 @@ pump_start (call_frame_t *pump_frame, xlator_t *this)
|
||||
}
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
"setting pump as started lk_owner: %"PRIu64" %"PRIu64,
|
||||
pump_frame->root->lk_owner, pump_pid);
|
||||
"setting pump as started lk_owner: %s %"PRIu64,
|
||||
lkowner_utoa (&pump_frame->root->lk_owner), pump_pid);
|
||||
|
||||
priv->use_afr_in_pump = 1;
|
||||
out:
|
||||
|
@ -143,9 +143,9 @@ __pl_inode_is_empty (pl_inode_t *pl_inode)
|
||||
void
|
||||
pl_print_locker (char *str, int size, xlator_t *this, call_frame_t *frame)
|
||||
{
|
||||
snprintf (str, size, "Pid=%llu, lk-owner=%llu, Transport=%p, Frame=%llu",
|
||||
snprintf (str, size, "Pid=%llu, lk-owner=%s, Transport=%p, Frame=%llu",
|
||||
(unsigned long long) frame->root->pid,
|
||||
(unsigned long long) frame->root->lk_owner,
|
||||
lkowner_utoa (&frame->root->lk_owner),
|
||||
(void *)frame->root->trans,
|
||||
(unsigned long long) frame->root->unique);
|
||||
}
|
||||
@ -187,7 +187,7 @@ pl_print_lockee (char *str, int size, fd_t *fd, loc_t *loc)
|
||||
|
||||
void
|
||||
pl_print_lock (char *str, int size, int cmd,
|
||||
struct gf_flock *flock, uint64_t owner)
|
||||
struct gf_flock *flock, gf_lkowner_t *owner)
|
||||
{
|
||||
char *cmd_str = NULL;
|
||||
char *type_str = NULL;
|
||||
@ -235,11 +235,11 @@ pl_print_lock (char *str, int size, int cmd,
|
||||
}
|
||||
|
||||
snprintf (str, size, "lock=FCNTL, cmd=%s, type=%s, "
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%llu",
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%s",
|
||||
cmd_str, type_str, (unsigned long long) flock->l_start,
|
||||
(unsigned long long) flock->l_len,
|
||||
(unsigned long long) flock->l_pid,
|
||||
(unsigned long long) owner);
|
||||
lkowner_utoa (owner));
|
||||
}
|
||||
|
||||
|
||||
@ -262,7 +262,7 @@ pl_trace_in (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
|
||||
if (domain)
|
||||
pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
|
||||
else
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, &frame->root->lk_owner);
|
||||
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}",
|
||||
@ -312,7 +312,7 @@ pl_trace_out (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
|
||||
if (domain)
|
||||
pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
|
||||
else
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, &frame->root->lk_owner);
|
||||
|
||||
pl_print_verdict (verdict, 32, op_ret, op_errno);
|
||||
|
||||
@ -342,7 +342,7 @@ pl_trace_block (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
|
||||
if (domain)
|
||||
pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
|
||||
else
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
|
||||
pl_print_lock (pl_lock, 256, cmd, flock, &frame->root->lk_owner);
|
||||
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}",
|
||||
@ -468,7 +468,7 @@ out:
|
||||
/* Create a new posix_lock_t */
|
||||
posix_lock_t *
|
||||
new_posix_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
|
||||
uint64_t owner, fd_t *fd)
|
||||
gf_lkowner_t *owner, fd_t *fd)
|
||||
{
|
||||
posix_lock_t *lock = NULL;
|
||||
|
||||
@ -494,7 +494,7 @@ new_posix_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
|
||||
lock->fd_num = fd_to_fdnum (fd);
|
||||
lock->fd = fd;
|
||||
lock->client_pid = client_pid;
|
||||
lock->owner = owner;
|
||||
lock->owner = *owner;
|
||||
|
||||
INIT_LIST_HEAD (&lock->list);
|
||||
|
||||
@ -569,8 +569,8 @@ int
|
||||
same_owner (posix_lock_t *l1, posix_lock_t *l2)
|
||||
{
|
||||
|
||||
return ((l1->owner == l2->owner) &&
|
||||
(l1->transport == l2->transport));
|
||||
return (is_same_lkowner (&l1->owner, &l2->owner) &&
|
||||
(l1->transport == l2->transport));
|
||||
|
||||
}
|
||||
|
||||
@ -889,10 +889,9 @@ __grant_blocked_locks (xlator_t *this, pl_inode_t *pl_inode, struct list_head *g
|
||||
posix_lock_to_flock (l, &conf->user_flock);
|
||||
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Granted",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => Granted",
|
||||
l->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
l->client_pid,
|
||||
l->owner,
|
||||
l->client_pid, lkowner_utoa (&l->owner),
|
||||
l->user_flock.l_start,
|
||||
l->user_flock.l_len);
|
||||
|
||||
@ -958,7 +957,7 @@ pl_send_prelock_unlock (xlator_t *this, pl_inode_t *pl_inode,
|
||||
|
||||
|
||||
unlock_lock = new_posix_lock (&flock, old_lock->transport,
|
||||
old_lock->client_pid, old_lock->owner,
|
||||
old_lock->client_pid, &old_lock->owner,
|
||||
old_lock->fd);
|
||||
GF_VALIDATE_OR_GOTO (this->name, unlock_lock, out);
|
||||
ret = 0;
|
||||
@ -1011,19 +1010,19 @@ pl_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
|
||||
|
||||
if (__is_lock_grantable (pl_inode, lock)) {
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => OK",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => OK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
__insert_and_merge (pl_inode, lock);
|
||||
} else if (can_block) {
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => Blocked",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
lock->blocked = 1;
|
||||
@ -1031,10 +1030,10 @@ pl_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
|
||||
ret = -1;
|
||||
} else {
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => NOK",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => NOK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
errno = EAGAIN;
|
||||
|
@ -20,10 +20,12 @@
|
||||
#ifndef __COMMON_H__
|
||||
#define __COMMON_H__
|
||||
|
||||
#include "lkowner.h"
|
||||
|
||||
#define SET_FLOCK_PID(flock, lock) ((flock)->l_pid = lock->client_pid)
|
||||
posix_lock_t *
|
||||
new_posix_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
|
||||
uint64_t owner, fd_t *fd);
|
||||
gf_lkowner_t *owner, fd_t *fd);
|
||||
|
||||
pl_inode_t *
|
||||
pl_inode_get (xlator_t *this, inode_t *inode);
|
||||
|
@ -35,7 +35,8 @@
|
||||
|
||||
static pl_entry_lock_t *
|
||||
new_entrylk_lock (pl_inode_t *pinode, const char *basename, entrylk_type type,
|
||||
void *trans, pid_t client_pid, uint64_t owner, const char *volume)
|
||||
void *trans, pid_t client_pid, gf_lkowner_t *owner,
|
||||
const char *volume)
|
||||
|
||||
{
|
||||
pl_entry_lock_t *newlock = NULL;
|
||||
@ -46,12 +47,12 @@ new_entrylk_lock (pl_inode_t *pinode, const char *basename, entrylk_type type,
|
||||
goto out;
|
||||
}
|
||||
|
||||
newlock->basename = basename ? gf_strdup (basename) : NULL;
|
||||
newlock->type = type;
|
||||
newlock->trans = trans;
|
||||
newlock->volume = volume;
|
||||
newlock->client_pid = client_pid;
|
||||
newlock->owner = owner;
|
||||
newlock->basename = basename ? gf_strdup (basename) : NULL;
|
||||
newlock->type = type;
|
||||
newlock->trans = trans;
|
||||
newlock->volume = volume;
|
||||
newlock->client_pid = client_pid;
|
||||
newlock->owner = *owner;
|
||||
|
||||
INIT_LIST_HEAD (&newlock->domain_list);
|
||||
INIT_LIST_HEAD (&newlock->blocked_locks);
|
||||
@ -81,11 +82,11 @@ names_conflict (const char *n1, const char *n2)
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
static inline int
|
||||
__same_entrylk_owner (pl_entry_lock_t *l1, pl_entry_lock_t *l2)
|
||||
{
|
||||
|
||||
return ((l1->owner == l2->owner) &&
|
||||
return (is_same_lkowner (&l1->owner, &l2->owner) &&
|
||||
(l1->trans == l2->trans));
|
||||
}
|
||||
|
||||
@ -320,15 +321,13 @@ __lock_name (pl_inode_t *pinode, const char *basename, entrylk_type type,
|
||||
pl_entry_lock_t *conf = NULL;
|
||||
void *trans = NULL;
|
||||
pid_t client_pid = 0;
|
||||
uint64_t owner = 0;
|
||||
|
||||
int ret = -EINVAL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
trans = frame->root->trans;
|
||||
client_pid = frame->root->pid;
|
||||
owner = frame->root->lk_owner;
|
||||
|
||||
lock = new_entrylk_lock (pinode, basename, type, trans, client_pid, owner, dom->domain);
|
||||
lock = new_entrylk_lock (pinode, basename, type, trans, client_pid,
|
||||
&frame->root->lk_owner, dom->domain);
|
||||
if (!lock) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -601,7 +600,6 @@ pl_common_entrylk (call_frame_t *frame, xlator_t *this,
|
||||
const char *volume, inode_t *inode, const char *basename,
|
||||
entrylk_cmd cmd, entrylk_type type, loc_t *loc, fd_t *fd)
|
||||
{
|
||||
uint64_t owner = 0;
|
||||
int32_t op_ret = -1;
|
||||
int32_t op_errno = 0;
|
||||
|
||||
@ -628,10 +626,9 @@ pl_common_entrylk (call_frame_t *frame, xlator_t *this,
|
||||
|
||||
entrylk_trace_in (this, frame, volume, fd, loc, basename, cmd, type);
|
||||
|
||||
owner = frame->root->lk_owner;
|
||||
transport = frame->root->trans;
|
||||
|
||||
if (owner == 0) {
|
||||
if (frame->root->lk_owner.len == 0) {
|
||||
/*
|
||||
this is a special case that means release
|
||||
all locks from this transport
|
||||
|
@ -120,9 +120,10 @@ inodelk_overlap (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
|
||||
}
|
||||
|
||||
/* Returns true if the 2 inodelks have the same owner */
|
||||
static int same_inodelk_owner (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
|
||||
static inline int
|
||||
same_inodelk_owner (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
|
||||
{
|
||||
return ((l1->owner == l2->owner) &&
|
||||
return (is_same_lkowner (&l1->owner, &l2->owner) &&
|
||||
(l1->transport == l2->transport));
|
||||
}
|
||||
|
||||
@ -212,10 +213,10 @@ __lock_inodelk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
|
||||
list_add_tail (&lock->blocked_locks, &dom->blocked_inodelks);
|
||||
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => Blocked",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -234,10 +235,10 @@ __lock_inodelk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"Lock is grantable, but blocking to prevent starvation");
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Blocked",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => Blocked",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -352,10 +353,10 @@ grant_blocked_inode_locks (xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *
|
||||
|
||||
list_for_each_entry_safe (lock, tmp, &granted, blocked_locks) {
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Granted",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => Granted",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -405,8 +406,9 @@ release_inode_locks_of_transport (xlator_t *this, pl_dom_list_t *dom,
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
"releasing blocking lock on %s held by "
|
||||
"{transport=%p, pid=%"PRId64" lk-owner=%"PRIu64"}",
|
||||
file, trans, (uint64_t) l->client_pid, l->owner);
|
||||
"{transport=%p, pid=%"PRId64" lk-owner=%s}",
|
||||
file, trans, (uint64_t) l->client_pid,
|
||||
lkowner_utoa (&l->owner));
|
||||
|
||||
list_add (&l->blocked_locks, &released);
|
||||
if (path) {
|
||||
@ -430,8 +432,9 @@ release_inode_locks_of_transport (xlator_t *this, pl_dom_list_t *dom,
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
"releasing granted lock on %s held by "
|
||||
"{transport=%p, pid=%"PRId64" lk-owner=%"PRIu64"}",
|
||||
file, trans, (uint64_t) l->client_pid, l->owner);
|
||||
"{transport=%p, pid=%"PRId64" lk-owner=%s}",
|
||||
file, trans, (uint64_t) l->client_pid,
|
||||
lkowner_utoa (&l->owner));
|
||||
if (path) {
|
||||
GF_FREE (path);
|
||||
path = NULL;
|
||||
@ -468,19 +471,19 @@ pl_inode_setlk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
|
||||
ret = __lock_inodelk (this, pl_inode, lock, can_block, dom);
|
||||
if (ret == 0)
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => OK",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => OK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->fl_start,
|
||||
lock->fl_end);
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => NOK",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => NOK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -510,7 +513,7 @@ out:
|
||||
/* Create a new inode_lock_t */
|
||||
pl_inode_lock_t *
|
||||
new_inode_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
|
||||
uint64_t owner, const char *volume)
|
||||
gf_lkowner_t *owner, const char *volume)
|
||||
|
||||
{
|
||||
pl_inode_lock_t *lock = NULL;
|
||||
@ -531,8 +534,8 @@ new_inode_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
|
||||
|
||||
lock->transport = transport;
|
||||
lock->client_pid = client_pid;
|
||||
lock->owner = owner;
|
||||
lock->volume = volume;
|
||||
lock->owner = *owner;
|
||||
|
||||
INIT_LIST_HEAD (&lock->list);
|
||||
INIT_LIST_HEAD (&lock->blocked_locks);
|
||||
@ -546,16 +549,15 @@ pl_common_inodelk (call_frame_t *frame, xlator_t *this,
|
||||
const char *volume, inode_t *inode, int32_t cmd,
|
||||
struct gf_flock *flock, loc_t *loc, fd_t *fd)
|
||||
{
|
||||
int32_t op_ret = -1;
|
||||
int32_t op_errno = 0;
|
||||
int ret = -1;
|
||||
int can_block = 0;
|
||||
void * transport = NULL;
|
||||
pid_t client_pid = -1;
|
||||
uint64_t owner = -1;
|
||||
pl_inode_t * pinode = NULL;
|
||||
pl_inode_lock_t * reqlock = NULL;
|
||||
pl_dom_list_t * dom = NULL;
|
||||
int32_t op_ret = -1;
|
||||
int32_t op_errno = 0;
|
||||
int ret = -1;
|
||||
int can_block = 0;
|
||||
pid_t client_pid = -1;
|
||||
void * transport = NULL;
|
||||
pl_inode_t * pinode = NULL;
|
||||
pl_inode_lock_t * reqlock = NULL;
|
||||
pl_dom_list_t * dom = NULL;
|
||||
|
||||
VALIDATE_OR_GOTO (frame, out);
|
||||
VALIDATE_OR_GOTO (inode, unwind);
|
||||
@ -570,7 +572,6 @@ pl_common_inodelk (call_frame_t *frame, xlator_t *this,
|
||||
|
||||
transport = frame->root->trans;
|
||||
client_pid = frame->root->pid;
|
||||
owner = frame->root->lk_owner;
|
||||
|
||||
pinode = pl_inode_get (this, inode);
|
||||
if (!pinode) {
|
||||
@ -580,7 +581,7 @@ pl_common_inodelk (call_frame_t *frame, xlator_t *this,
|
||||
|
||||
dom = get_domain (pinode, volume);
|
||||
|
||||
if (owner == 0) {
|
||||
if (frame->root->lk_owner.len == 0) {
|
||||
/*
|
||||
special case: this means release all locks
|
||||
from this transport
|
||||
@ -594,7 +595,8 @@ pl_common_inodelk (call_frame_t *frame, xlator_t *this,
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
reqlock = new_inode_lock (flock, transport, client_pid, owner, volume);
|
||||
reqlock = new_inode_lock (flock, transport, client_pid,
|
||||
&frame->root->lk_owner, volume);
|
||||
|
||||
if (!reqlock) {
|
||||
op_ret = -1;
|
||||
@ -687,12 +689,12 @@ __get_inodelk_count (xlator_t *this, pl_inode_t *pl_inode)
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
" XATTR DEBUG"
|
||||
" domain: %s %s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" "
|
||||
" domain: %s %s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" "
|
||||
"state = Active",
|
||||
dom->domain,
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -703,12 +705,12 @@ __get_inodelk_count (xlator_t *this, pl_inode_t *pl_inode)
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
" XATTR DEBUG"
|
||||
" domain: %s %s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" "
|
||||
" domain: %s %s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" "
|
||||
"state = Blocked",
|
||||
dom->domain,
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "call-stub.h"
|
||||
#include "locks-mem-types.h"
|
||||
|
||||
#include "lkowner.h"
|
||||
|
||||
#define POSIX_LOCKS "posix-locks"
|
||||
struct __pl_fd;
|
||||
|
||||
@ -55,8 +57,8 @@ struct __posix_lock {
|
||||
across nodes */
|
||||
|
||||
void *transport; /* to identify client node */
|
||||
gf_lkowner_t owner;
|
||||
pid_t client_pid; /* pid of client process */
|
||||
uint64_t owner; /* lock owner from fuse */
|
||||
};
|
||||
typedef struct __posix_lock posix_lock_t;
|
||||
|
||||
@ -83,8 +85,8 @@ struct __pl_inode_lock {
|
||||
across nodes */
|
||||
|
||||
void *transport; /* to identify client node */
|
||||
gf_lkowner_t owner;
|
||||
pid_t client_pid; /* pid of client process */
|
||||
uint64_t owner;
|
||||
};
|
||||
typedef struct __pl_inode_lock pl_inode_lock_t;
|
||||
|
||||
@ -120,9 +122,9 @@ struct __entry_lock {
|
||||
struct timeval blkd_time; /*time at which lock was queued into blkd list*/
|
||||
struct timeval granted_time; /*time at which lock was queued into active list*/
|
||||
|
||||
void *trans;
|
||||
void *trans;
|
||||
gf_lkowner_t owner;
|
||||
pid_t client_pid; /* pid of client process */
|
||||
uint64_t owner;
|
||||
};
|
||||
typedef struct __entry_lock pl_entry_lock_t;
|
||||
|
||||
|
@ -126,7 +126,7 @@ pl_truncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
static int
|
||||
truncate_allowed (pl_inode_t *pl_inode,
|
||||
void *transport, pid_t client_pid,
|
||||
uint64_t owner, off_t offset)
|
||||
gf_lkowner_t *owner, off_t offset)
|
||||
{
|
||||
posix_lock_t *l = NULL;
|
||||
posix_lock_t region = {.list = {0, }, };
|
||||
@ -136,7 +136,7 @@ truncate_allowed (pl_inode_t *pl_inode,
|
||||
region.fl_end = LLONG_MAX;
|
||||
region.transport = transport;
|
||||
region.client_pid = client_pid;
|
||||
region.owner = owner;
|
||||
region.owner = *owner;
|
||||
|
||||
pthread_mutex_lock (&pl_inode->mutex);
|
||||
{
|
||||
@ -192,7 +192,7 @@ truncate_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
if (priv->mandatory
|
||||
&& pl_inode->mandatory
|
||||
&& !truncate_allowed (pl_inode, frame->root->trans,
|
||||
frame->root->pid, frame->root->lk_owner,
|
||||
frame->root->pid, &frame->root->lk_owner,
|
||||
local->offset)) {
|
||||
op_ret = -1;
|
||||
op_errno = EAGAIN;
|
||||
@ -324,7 +324,7 @@ delete_locks_of_fd (xlator_t *this, pl_inode_t *pl_inode, fd_t *fd)
|
||||
|
||||
static void
|
||||
__delete_locks_of_owner (pl_inode_t *pl_inode,
|
||||
void *transport, uint64_t owner)
|
||||
void *transport, gf_lkowner_t *owner)
|
||||
{
|
||||
posix_lock_t *tmp = NULL;
|
||||
posix_lock_t *l = NULL;
|
||||
@ -332,14 +332,14 @@ __delete_locks_of_owner (pl_inode_t *pl_inode,
|
||||
/* TODO: what if it is a blocked lock with pending l->frame */
|
||||
|
||||
list_for_each_entry_safe (l, tmp, &pl_inode->ext_list, list) {
|
||||
if ((l->transport == transport)
|
||||
&& (l->owner == owner)) {
|
||||
if ((l->transport == transport) &&
|
||||
is_same_lkowner (&l->owner, owner)) {
|
||||
gf_log ("posix-locks", GF_LOG_TRACE,
|
||||
" Flushing lock"
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" state: %s",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" state: %s",
|
||||
l->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
l->client_pid,
|
||||
l->owner,
|
||||
lkowner_utoa (&l->owner),
|
||||
l->user_flock.l_start,
|
||||
l->user_flock.l_len,
|
||||
l->blocked == 1 ? "Blocked" : "Active");
|
||||
@ -408,10 +408,7 @@ int
|
||||
pl_flush (call_frame_t *frame, xlator_t *this,
|
||||
fd_t *fd)
|
||||
{
|
||||
pl_inode_t *pl_inode = NULL;
|
||||
uint64_t owner = -1;
|
||||
|
||||
owner = frame->root->lk_owner;
|
||||
pl_inode_t *pl_inode = NULL;
|
||||
|
||||
pl_inode = pl_inode_get (this, fd->inode);
|
||||
|
||||
@ -423,7 +420,7 @@ pl_flush (call_frame_t *frame, xlator_t *this,
|
||||
|
||||
pl_trace_flush (this, frame, fd);
|
||||
|
||||
if (owner == 0) {
|
||||
if (frame->root->lk_owner.len == 0) {
|
||||
/* Handle special case when protocol/server sets lk-owner to zero.
|
||||
* This usually happens due to a client disconnection. Hence, free
|
||||
* all locks opened with this fd.
|
||||
@ -437,7 +434,7 @@ pl_flush (call_frame_t *frame, xlator_t *this,
|
||||
pthread_mutex_lock (&pl_inode->mutex);
|
||||
{
|
||||
__delete_locks_of_owner (pl_inode, frame->root->trans,
|
||||
owner);
|
||||
&frame->root->lk_owner);
|
||||
}
|
||||
pthread_mutex_unlock (&pl_inode->mutex);
|
||||
|
||||
@ -805,7 +802,7 @@ lock_dup (posix_lock_t *lock)
|
||||
posix_lock_t *new_lock = NULL;
|
||||
|
||||
new_lock = new_posix_lock (&lock->user_flock, lock->transport,
|
||||
lock->client_pid, lock->owner,
|
||||
lock->client_pid, &lock->owner,
|
||||
(fd_t *)lock->fd_num);
|
||||
return new_lock;
|
||||
}
|
||||
@ -964,20 +961,18 @@ int
|
||||
pl_lk (call_frame_t *frame, xlator_t *this,
|
||||
fd_t *fd, int32_t cmd, struct gf_flock *flock)
|
||||
{
|
||||
void *transport = NULL;
|
||||
pid_t client_pid = 0;
|
||||
uint64_t owner = 0;
|
||||
pl_inode_t *pl_inode = NULL;
|
||||
int op_ret = 0;
|
||||
int op_errno = 0;
|
||||
int can_block = 0;
|
||||
posix_lock_t *reqlock = NULL;
|
||||
posix_lock_t *conf = NULL;
|
||||
int ret = 0;
|
||||
void *transport = NULL;
|
||||
pid_t client_pid = 0;
|
||||
pl_inode_t *pl_inode = NULL;
|
||||
int op_ret = 0;
|
||||
int op_errno = 0;
|
||||
int can_block = 0;
|
||||
posix_lock_t *reqlock = NULL;
|
||||
posix_lock_t *conf = NULL;
|
||||
int ret = 0;
|
||||
|
||||
transport = frame->root->trans;
|
||||
client_pid = frame->root->pid;
|
||||
owner = frame->root->lk_owner;
|
||||
|
||||
if ((flock->l_start < 0) || (flock->l_len < 0)) {
|
||||
op_ret = -1;
|
||||
@ -993,7 +988,7 @@ pl_lk (call_frame_t *frame, xlator_t *this,
|
||||
}
|
||||
|
||||
reqlock = new_posix_lock (flock, transport, client_pid,
|
||||
owner, fd);
|
||||
&frame->root->lk_owner, fd);
|
||||
|
||||
if (!reqlock) {
|
||||
op_ret = -1;
|
||||
@ -1327,10 +1322,10 @@ __get_posixlk_count (xlator_t *this, pl_inode_t *pl_inode)
|
||||
|
||||
gf_log (this->name, GF_LOG_DEBUG,
|
||||
" XATTR DEBUG"
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" state: %s",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" state: %s",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len,
|
||||
lock->blocked == 1 ? "Blocked" : "Active");
|
||||
@ -1504,7 +1499,7 @@ out:
|
||||
|
||||
void
|
||||
pl_dump_lock (char *str, int size, struct gf_flock *flock,
|
||||
uint64_t owner, void *trans, time_t *granted_time,
|
||||
gf_lkowner_t *owner, void *trans, time_t *granted_time,
|
||||
time_t *blkd_time, gf_boolean_t active)
|
||||
{
|
||||
char *type_str = NULL;
|
||||
@ -1526,30 +1521,32 @@ pl_dump_lock (char *str, int size, struct gf_flock *flock,
|
||||
|
||||
if (active) {
|
||||
if (blkd_time && *blkd_time == 0) {
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, pid=%llu, lk-owner=%llu, transport=%p, "
|
||||
"granted at %s",
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, "
|
||||
"pid=%llu, lk-owner=%s, transport=%p, "
|
||||
"granted at %s",
|
||||
type_str, (unsigned long long) flock->l_start,
|
||||
(unsigned long long) flock->l_len,
|
||||
(unsigned long long) flock->l_pid,
|
||||
(unsigned long long) owner,
|
||||
lkowner_utoa (owner),
|
||||
trans, ctime (granted_time));
|
||||
} else {
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, pid=%llu, lk-owner=%llu, transport=%p, "
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, "
|
||||
"pid=%llu, lk-owner=%s, transport=%p, "
|
||||
"blocked at %s, granted at %s",
|
||||
type_str, (unsigned long long) flock->l_start,
|
||||
(unsigned long long) flock->l_len,
|
||||
(unsigned long long) flock->l_pid,
|
||||
(unsigned long long) owner,
|
||||
lkowner_utoa (owner),
|
||||
trans, ctime (blkd_time), ctime (granted_time));
|
||||
}
|
||||
}
|
||||
else {
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, pid=%llu, lk-owner=%llu, transport=%p, "
|
||||
"blocked at %s",
|
||||
snprintf (str, size, "type=%s, start=%llu, len=%llu, pid=%llu, "
|
||||
"lk-owner=%s, transport=%p, blocked at %s",
|
||||
type_str, (unsigned long long) flock->l_start,
|
||||
(unsigned long long) flock->l_len,
|
||||
(unsigned long long) flock->l_pid,
|
||||
(unsigned long long) owner,
|
||||
lkowner_utoa (owner),
|
||||
trans, ctime (blkd_time));
|
||||
}
|
||||
|
||||
@ -1580,20 +1577,20 @@ __dump_entrylks (pl_inode_t *pl_inode)
|
||||
"xlator.feature.locks.lock-dump.domain.entrylk",
|
||||
"entrylk[%d](ACTIVE)", count );
|
||||
if (lock->blkd_time.tv_sec == 0 && lock->blkd_time.tv_usec == 0) {
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%llu, transport=%p,"
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%s, transport=%p,"
|
||||
" granted at %s",
|
||||
lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
|
||||
"ENTRYLK_WRLCK", lock->basename,
|
||||
(unsigned long long) lock->client_pid,
|
||||
(unsigned long long) lock->owner, lock->trans,
|
||||
lkowner_utoa (&lock->owner), lock->trans,
|
||||
ctime (&lock->granted_time.tv_sec));
|
||||
} else {
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%llu, transport=%p,"
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%s, transport=%p,"
|
||||
" blocked at %s, granted at %s",
|
||||
lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
|
||||
"ENTRYLK_WRLCK", lock->basename,
|
||||
(unsigned long long) lock->client_pid,
|
||||
(unsigned long long) lock->owner, lock->trans,
|
||||
lkowner_utoa (&lock->owner), lock->trans,
|
||||
ctime (&lock->blkd_time.tv_sec),
|
||||
ctime (&lock->granted_time.tv_sec));
|
||||
}
|
||||
@ -1608,12 +1605,12 @@ __dump_entrylks (pl_inode_t *pl_inode)
|
||||
gf_proc_dump_build_key(key,
|
||||
"xlator.feature.locks.lock-dump.domain.entrylk",
|
||||
"entrylk[%d](BLOCKED)", count );
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%llu, transport=%p,"
|
||||
snprintf (tmp, 256," %s on %s pid = %llu, owner=%s, transport=%p,"
|
||||
" blocked at %s",
|
||||
lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
|
||||
"ENTRYLK_WRLCK", lock->basename,
|
||||
(unsigned long long) lock->client_pid,
|
||||
(unsigned long long) lock->owner, lock->trans,
|
||||
lkowner_utoa (&lock->owner), lock->trans,
|
||||
ctime (&lock->blkd_time.tv_sec));
|
||||
|
||||
gf_proc_dump_write(key, tmp);
|
||||
@ -1663,7 +1660,8 @@ __dump_inodelks (pl_inode_t *pl_inode)
|
||||
|
||||
SET_FLOCK_PID (&lock->user_flock, lock);
|
||||
pl_dump_lock (tmp, 256, &lock->user_flock,
|
||||
lock->owner, lock->transport,
|
||||
&lock->owner,
|
||||
lock->transport,
|
||||
&lock->granted_time.tv_sec,
|
||||
&lock->blkd_time.tv_sec,
|
||||
_gf_true);
|
||||
@ -1679,7 +1677,8 @@ __dump_inodelks (pl_inode_t *pl_inode)
|
||||
"inodelk[%d](BLOCKED)",count );
|
||||
SET_FLOCK_PID (&lock->user_flock, lock);
|
||||
pl_dump_lock (tmp, 256, &lock->user_flock,
|
||||
lock->owner, lock->transport,
|
||||
&lock->owner,
|
||||
lock->transport,
|
||||
0, &lock->blkd_time.tv_sec,
|
||||
_gf_false);
|
||||
gf_proc_dump_write(key, tmp);
|
||||
@ -1720,7 +1719,7 @@ __dump_posixlks (pl_inode_t *pl_inode)
|
||||
count,
|
||||
lock->blocked ? "BLOCKED" : "ACTIVE");
|
||||
pl_dump_lock (tmp, 256, &lock->user_flock,
|
||||
lock->owner, lock->transport,
|
||||
&lock->owner, lock->transport,
|
||||
&lock->granted_time.tv_sec, &lock->blkd_time.tv_sec,
|
||||
(lock->blocked)? _gf_false: _gf_true);
|
||||
gf_proc_dump_write(key, tmp);
|
||||
|
@ -81,10 +81,10 @@ out:
|
||||
return ret_lock;
|
||||
}
|
||||
|
||||
static int
|
||||
static inline int
|
||||
__same_owner_reservelk (posix_lock_t *l1, posix_lock_t *l2)
|
||||
{
|
||||
return ((l1->owner == l2->owner));
|
||||
return (is_same_lkowner (&l1->owner, &l2->owner));
|
||||
|
||||
}
|
||||
|
||||
@ -187,10 +187,10 @@ __lock_reservelk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
|
||||
list_add_tail (&lock->list, &pl_inode->blocked_reservelks);
|
||||
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
|
||||
"%s (pid=%d) lk-owner:%s %"PRId64" - %"PRId64" => Blocked",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -292,10 +292,10 @@ grant_blocked_reserve_locks (xlator_t *this, pl_inode_t *pl_inode)
|
||||
|
||||
list_for_each_entry_safe (lock, tmp, &granted, list) {
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Granted",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => Granted",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
|
||||
@ -429,18 +429,18 @@ pl_reserve_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
|
||||
ret = __lock_reservelk (this, pl_inode, lock, can_block);
|
||||
if (ret < 0)
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => NOK",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => NOK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len);
|
||||
else
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => OK",
|
||||
"%s (pid=%d) (lk-owner=%s) %"PRId64" - %"PRId64" => OK",
|
||||
lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
|
||||
lock->client_pid,
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->fl_start,
|
||||
lock->fl_end);
|
||||
|
||||
|
@ -152,7 +152,7 @@ mq_assign_lk_owner (xlator_t *this, call_frame_t *frame)
|
||||
}
|
||||
UNLOCK (&conf->lock);
|
||||
|
||||
frame->root->lk_owner = lk_owner;
|
||||
set_lk_owner_from_uint64 (&frame->root->lk_owner, lk_owner);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "glusterfs3.h"
|
||||
#include "protocol-common.h"
|
||||
#include "rpcsvc.h"
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
extern struct rpc_clnt_program gd_peer_prog;
|
||||
extern struct rpc_clnt_program gd_mgmt_prog;
|
||||
|
@ -3110,11 +3110,11 @@ fuse_setlk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
} else if (op_errno == EAGAIN) {
|
||||
gf_log ("glusterfs-fuse", GF_LOG_DEBUG,
|
||||
"Returning EAGAIN Flock: "
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%llu",
|
||||
"start=%llu, len=%llu, pid=%llu, lk-owner=%s",
|
||||
(unsigned long long) lock->l_start,
|
||||
(unsigned long long) lock->l_len,
|
||||
(unsigned long long) lock->l_pid,
|
||||
(unsigned long long) frame->root->lk_owner);
|
||||
lkowner_utoa (&frame->root->lk_owner));
|
||||
|
||||
} else {
|
||||
gf_log ("glusterfs-fuse", GF_LOG_WARNING,
|
||||
|
@ -145,7 +145,7 @@ frame_fill_groups (call_frame_t *frame)
|
||||
{
|
||||
#if defined(GF_LINUX_HOST_OS)
|
||||
char filename[32];
|
||||
char line[128];
|
||||
char line[4096];
|
||||
char *ptr = NULL;
|
||||
FILE *fp = NULL;
|
||||
int idx = 0;
|
||||
@ -178,7 +178,7 @@ frame_fill_groups (call_frame_t *frame)
|
||||
if (!endptr || *endptr)
|
||||
break;
|
||||
frame->root->groups[idx++] = id;
|
||||
if (idx == GF_REQUEST_MAXGROUPS)
|
||||
if (idx == GF_MAX_AUX_GROUPS)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -237,7 +237,6 @@ out:
|
||||
#endif /* GF_LINUX_HOST_OS */
|
||||
}
|
||||
|
||||
|
||||
call_frame_t *
|
||||
get_call_frame_for_req (fuse_state_t *state)
|
||||
{
|
||||
@ -260,8 +259,9 @@ get_call_frame_for_req (fuse_state_t *state)
|
||||
frame->root->uid = finh->uid;
|
||||
frame->root->gid = finh->gid;
|
||||
frame->root->pid = finh->pid;
|
||||
frame->root->lk_owner = state->lk_owner;
|
||||
frame->root->unique = finh->unique;
|
||||
set_lk_owner_from_uint64 (&frame->root->lk_owner,
|
||||
state->lk_owner);
|
||||
}
|
||||
|
||||
frame_fill_groups (frame);
|
||||
|
@ -90,7 +90,7 @@ unsigned int cval = 1;
|
||||
int
|
||||
nfs_frame_getctr ()
|
||||
{
|
||||
int val = 0;
|
||||
uint64_t val = 0;
|
||||
|
||||
pthread_mutex_lock (&ctr);
|
||||
{
|
||||
@ -132,7 +132,8 @@ nfs_create_frame (xlator_t *xl, nfs_user_t *nfu)
|
||||
gf_log (GF_NFS, GF_LOG_TRACE, "gid: %d", nfu->gids[x]);
|
||||
frame->root->groups[y] = nfu->gids[x];
|
||||
}
|
||||
frame->root->lk_owner = nfs_frame_getctr ();
|
||||
|
||||
set_lk_owner_from_uint64 (&frame->root->lk_owner, nfs_frame_getctr ());
|
||||
|
||||
err:
|
||||
return frame;
|
||||
|
@ -45,6 +45,12 @@
|
||||
#define GF_NFS_DVM_ON 1
|
||||
#define GF_NFS_DVM_OFF 2
|
||||
|
||||
/* This corresponds to the max 16 number of group IDs that are sent through an
|
||||
* RPC request. Since NFS is the only one going to set this, we can be safe
|
||||
* in keeping this size hardcoded.
|
||||
*/
|
||||
#define GF_REQUEST_MAXGROUPS 16
|
||||
|
||||
/* Callback into a version-specific NFS protocol.
|
||||
* The return type is used by the nfs.c code to register the protocol.
|
||||
* with the RPC service.
|
||||
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include "glusterfs3.h"
|
||||
#include "portmap-xdr.h"
|
||||
#include "rpc-common-xdr.h"
|
||||
|
||||
extern rpc_clnt_prog_t clnt3_1_fop_prog;
|
||||
extern rpc_clnt_prog_t clnt_pmap_prog;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "common-utils.h"
|
||||
#include "xlator.h"
|
||||
#include "client.h"
|
||||
#include "lkowner.h"
|
||||
|
||||
static void
|
||||
__insert_and_merge (clnt_fd_ctx_t *fdctx, client_posix_lock_t *lock);
|
||||
@ -36,11 +37,11 @@ __dump_client_lock (client_posix_lock_t *lock)
|
||||
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"{fd=%p}"
|
||||
"{%s lk-owner:%"PRIu64" %"PRId64" - %"PRId64"}"
|
||||
"{%s lk-owner:%s %"PRId64" - %"PRId64"}"
|
||||
"{start=%"PRId64" end=%"PRId64"}",
|
||||
lock->fd,
|
||||
lock->fl_type == F_WRLCK ? "Write-Lock" : "Read-Lock",
|
||||
lock->owner,
|
||||
lkowner_utoa (&lock->owner),
|
||||
lock->user_flock.l_start,
|
||||
lock->user_flock.l_len,
|
||||
lock->fl_start,
|
||||
@ -133,12 +134,6 @@ add_locks (client_posix_lock_t *l1, client_posix_lock_t *l2)
|
||||
return sum;
|
||||
}
|
||||
|
||||
/* Return true if the locks have the same owner */
|
||||
static int
|
||||
same_owner (client_posix_lock_t *l1, client_posix_lock_t *l2)
|
||||
{
|
||||
return ((l1->owner == l2->owner));
|
||||
}
|
||||
|
||||
/* Return true if the locks overlap, false otherwise */
|
||||
static int
|
||||
@ -285,11 +280,11 @@ __insert_and_merge (clnt_fd_ctx_t *fdctx, client_posix_lock_t *lock)
|
||||
if (!locks_overlap (conf, lock))
|
||||
continue;
|
||||
|
||||
if (same_owner (conf, lock)) {
|
||||
if (is_same_lkowner (&conf->owner, &lock->owner)) {
|
||||
if (conf->fl_type == lock->fl_type) {
|
||||
sum = add_locks (lock, conf);
|
||||
|
||||
sum->fd = lock->fd;
|
||||
sum->fd = lock->fd;
|
||||
|
||||
__delete_client_lock (conf);
|
||||
__destroy_client_lock (conf);
|
||||
@ -301,8 +296,8 @@ __insert_and_merge (clnt_fd_ctx_t *fdctx, client_posix_lock_t *lock)
|
||||
} else {
|
||||
sum = add_locks (lock, conf);
|
||||
|
||||
sum->fd = conf->fd;
|
||||
sum->owner = conf->owner;
|
||||
sum->fd = conf->fd;
|
||||
sum->owner = conf->owner;
|
||||
|
||||
v = subtract_locks (sum, lock);
|
||||
|
||||
@ -365,7 +360,7 @@ destroy_client_lock (client_posix_lock_t *lock)
|
||||
}
|
||||
|
||||
int32_t
|
||||
delete_granted_locks_owner (fd_t *fd, uint64_t owner)
|
||||
delete_granted_locks_owner (fd_t *fd, gf_lkowner_t *owner)
|
||||
{
|
||||
clnt_fd_ctx_t *fdctx = NULL;
|
||||
client_posix_lock_t *lock = NULL;
|
||||
@ -389,7 +384,7 @@ delete_granted_locks_owner (fd_t *fd, uint64_t owner)
|
||||
pthread_mutex_lock (&fdctx->mutex);
|
||||
{
|
||||
list_for_each_entry_safe (lock, tmp, &fdctx->lock_list, list) {
|
||||
if (lock->owner == owner) {
|
||||
if (!is_same_lkowner (&lock->owner, owner)) {
|
||||
list_del_init (&lock->list);
|
||||
list_add_tail (&lock->list, &delete_list);
|
||||
count++;
|
||||
@ -486,7 +481,7 @@ client_cmd_to_gf_cmd (int32_t cmd, int32_t *gf_cmd)
|
||||
}
|
||||
|
||||
static client_posix_lock_t *
|
||||
new_client_lock (struct gf_flock *flock, uint64_t owner,
|
||||
new_client_lock (struct gf_flock *flock, gf_lkowner_t *owner,
|
||||
int32_t cmd, fd_t *fd)
|
||||
{
|
||||
client_posix_lock_t *new_lock = NULL;
|
||||
@ -509,7 +504,8 @@ new_client_lock (struct gf_flock *flock, uint64_t owner,
|
||||
else
|
||||
new_lock->fl_end = flock->l_start + flock->l_len - 1;
|
||||
|
||||
new_lock->owner = owner;
|
||||
new_lock->owner = *owner;
|
||||
|
||||
new_lock->cmd = cmd; /* Not really useful */
|
||||
|
||||
out:
|
||||
@ -527,8 +523,8 @@ client_save_number_fds (clnt_conf_t *conf, int count)
|
||||
}
|
||||
|
||||
int
|
||||
client_add_lock_for_recovery (fd_t *fd, struct gf_flock *flock, uint64_t owner,
|
||||
int32_t cmd)
|
||||
client_add_lock_for_recovery (fd_t *fd, struct gf_flock *flock,
|
||||
gf_lkowner_t *owner, int32_t cmd)
|
||||
{
|
||||
clnt_fd_ctx_t *fdctx = NULL;
|
||||
xlator_t *this = NULL;
|
||||
@ -572,13 +568,13 @@ construct_reserve_unlock (struct gf_flock *lock, call_frame_t *frame,
|
||||
{
|
||||
GF_ASSERT (lock);
|
||||
GF_ASSERT (frame);
|
||||
GF_ASSERT (frame->root->lk_owner);
|
||||
|
||||
lock->l_type = F_UNLCK;
|
||||
lock->l_start = 0;
|
||||
lock->l_whence = SEEK_SET;
|
||||
lock->l_len = 0; /* Whole file */
|
||||
lock->l_pid = (uint64_t)(unsigned long)frame->root;
|
||||
lock->l_owner = client_lock->owner;
|
||||
|
||||
frame->root->lk_owner = client_lock->owner;
|
||||
|
||||
@ -827,7 +823,6 @@ static int
|
||||
client_send_recovery_lock (call_frame_t *frame, xlator_t *this,
|
||||
client_posix_lock_t *lock)
|
||||
{
|
||||
|
||||
frame->root->lk_owner = lock->owner;
|
||||
|
||||
/* Send all locks as F_SETLK to prevent the frame
|
||||
|
@ -111,8 +111,7 @@ typedef struct _client_posix_lock {
|
||||
off_t fl_end;
|
||||
short fl_type;
|
||||
int32_t cmd; /* the cmd for the lock call */
|
||||
uint64_t owner; /* lock owner from fuse */
|
||||
|
||||
gf_lkowner_t owner; /* lock owner from fuse */
|
||||
struct list_head list; /* reference used to add to the fdctx list of locks */
|
||||
} client_posix_lock_t;
|
||||
|
||||
@ -126,7 +125,7 @@ typedef struct client_local {
|
||||
struct iobref *iobref;
|
||||
|
||||
client_posix_lock_t *client_lock;
|
||||
uint64_t owner;
|
||||
gf_lkowner_t owner;
|
||||
int32_t cmd;
|
||||
struct list_head lock_list;
|
||||
pthread_mutex_t mutex;
|
||||
@ -190,9 +189,9 @@ int unserialize_rsp_direntp (struct gfs3_readdirp_rsp *rsp, gf_dirent_t *entries
|
||||
int clnt_readdir_rsp_cleanup (gfs3_readdir_rsp *rsp);
|
||||
int clnt_readdirp_rsp_cleanup (gfs3_readdirp_rsp *rsp);
|
||||
int client_attempt_lock_recovery (xlator_t *this, clnt_fd_ctx_t *fdctx);
|
||||
int32_t delete_granted_locks_owner (fd_t *fd, uint64_t owner);
|
||||
int client_add_lock_for_recovery (fd_t *fd, struct gf_flock *flock, uint64_t owner,
|
||||
int32_t cmd);
|
||||
int32_t delete_granted_locks_owner (fd_t *fd, gf_lkowner_t *owner);
|
||||
int client_add_lock_for_recovery (fd_t *fd, struct gf_flock *flock,
|
||||
gf_lkowner_t *owner, int32_t cmd);
|
||||
uint64_t decrement_reopen_fd_count (xlator_t *this, clnt_conf_t *conf);
|
||||
int32_t delete_granted_locks_fd (clnt_fd_ctx_t *fdctx);
|
||||
int32_t client_cmd_to_gf_cmd (int32_t cmd, int32_t *gf_cmd);
|
||||
|
@ -725,10 +725,10 @@ client3_1_flush_cbk (struct rpc_req *req, struct iovec *iov, int count,
|
||||
|
||||
if (rsp.op_ret >= 0) {
|
||||
/* Delete all saved locks of the owner issuing flush */
|
||||
ret = delete_granted_locks_owner (local->fd, local->owner);
|
||||
ret = delete_granted_locks_owner (local->fd, &local->owner);
|
||||
gf_log (this->name, GF_LOG_TRACE,
|
||||
"deleting locks of owner (%llu) returned %d",
|
||||
(long long unsigned) local->owner, ret);
|
||||
"deleting locks of owner (%s) returned %d",
|
||||
lkowner_utoa (&local->owner), ret);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -3655,7 +3655,7 @@ client3_1_flush (call_frame_t *frame, xlator_t *this,
|
||||
gfs3_flush_req req = {{0,},};
|
||||
int64_t remote_fd = -1;
|
||||
clnt_conf_t *conf = NULL;
|
||||
clnt_local_t *local = NULL;
|
||||
clnt_local_t *local = NULL;
|
||||
int op_errno = ESTALE;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -39,7 +39,7 @@ server_decode_groups (call_frame_t *frame, rpcsvc_request_t *req)
|
||||
if (frame->root->ngrps == 0)
|
||||
return 0;
|
||||
|
||||
if (frame->root->ngrps > GF_REQUEST_MAXGROUPS)
|
||||
if (frame->root->ngrps > GF_MAX_AUX_GROUPS)
|
||||
return -1;
|
||||
|
||||
for (; i < frame->root->ngrps; ++i)
|
||||
@ -134,7 +134,7 @@ free_state (server_state_t *state)
|
||||
|
||||
int
|
||||
gf_add_locker (struct _lock_table *table, const char *volume,
|
||||
loc_t *loc, fd_t *fd, pid_t pid, uint64_t owner,
|
||||
loc_t *loc, fd_t *fd, pid_t pid, gf_lkowner_t *owner,
|
||||
glusterfs_fop_t type)
|
||||
{
|
||||
int32_t ret = -1;
|
||||
@ -158,7 +158,7 @@ gf_add_locker (struct _lock_table *table, const char *volume,
|
||||
}
|
||||
|
||||
new->pid = pid;
|
||||
new->owner = owner;
|
||||
new->owner = *owner;
|
||||
|
||||
LOCK (&table->lock);
|
||||
{
|
||||
@ -175,7 +175,8 @@ out:
|
||||
|
||||
int
|
||||
gf_del_locker (struct _lock_table *table, const char *volume,
|
||||
loc_t *loc, fd_t *fd, uint64_t owner, glusterfs_fop_t type)
|
||||
loc_t *loc, fd_t *fd, gf_lkowner_t *owner,
|
||||
glusterfs_fop_t type)
|
||||
{
|
||||
struct _locker *locker = NULL;
|
||||
struct _locker *tmp = NULL;
|
||||
@ -197,17 +198,15 @@ gf_del_locker (struct _lock_table *table, const char *volume,
|
||||
}
|
||||
|
||||
list_for_each_entry_safe (locker, tmp, head, lockers) {
|
||||
if (locker->fd && fd &&
|
||||
(locker->fd == fd) && (locker->owner == owner)
|
||||
&& !strcmp (locker->volume, volume)) {
|
||||
if (!is_same_lkowner (&locker->owner, owner) ||
|
||||
strcmp (locker->volume, volume))
|
||||
continue;
|
||||
|
||||
if (locker->fd && fd && (locker->fd == fd))
|
||||
list_move_tail (&locker->lockers, &del);
|
||||
} else if (locker->loc.inode &&
|
||||
loc &&
|
||||
(locker->loc.inode == loc->inode) &&
|
||||
(locker->owner == owner)
|
||||
&& !strcmp (locker->volume, volume)) {
|
||||
else if (locker->loc.inode && loc &&
|
||||
(locker->loc.inode == loc->inode))
|
||||
list_move_tail (&locker->lockers, &del);
|
||||
}
|
||||
}
|
||||
}
|
||||
UNLOCK (&table->lock);
|
||||
@ -314,9 +313,9 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
|
||||
lock owner = 0 is a special case that tells posix-locks
|
||||
to release all locks from this transport
|
||||
*/
|
||||
tmp_frame->root->pid = 0;
|
||||
tmp_frame->root->lk_owner = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
tmp_frame->root->pid = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
memset (&tmp_frame->root->lk_owner, 0, sizeof (gf_lkowner_t));
|
||||
|
||||
if (locker->fd) {
|
||||
GF_ASSERT (locker->fd->inode);
|
||||
@ -361,9 +360,9 @@ do_lock_table_cleanup (xlator_t *this, server_connection_t *conn,
|
||||
list_for_each_entry_safe (locker, tmp, &entrylk_lockers, lockers) {
|
||||
tmp_frame = copy_frame (frame);
|
||||
|
||||
tmp_frame->root->lk_owner = 0;
|
||||
tmp_frame->root->pid = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
tmp_frame->root->pid = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
memset (&tmp_frame->root->lk_owner, 0, sizeof (gf_lkowner_t));
|
||||
|
||||
if (locker->fd) {
|
||||
GF_ASSERT (locker->fd->inode);
|
||||
@ -480,7 +479,9 @@ do_fd_cleanup (xlator_t *this, server_connection_t *conn, call_frame_t *frame,
|
||||
|
||||
tmp_frame->root->pid = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
tmp_frame->root->lk_owner = 0;
|
||||
memset (&tmp_frame->root->lk_owner, 0,
|
||||
sizeof (gf_lkowner_t));
|
||||
|
||||
STACK_WIND (tmp_frame,
|
||||
server_connection_cleanup_flush_cbk,
|
||||
bound_xl, bound_xl->fops->flush, fd);
|
||||
@ -630,8 +631,9 @@ server_connection_destroy (xlator_t *this, server_connection_t *conn)
|
||||
lock_owner = 0 is a special case that tells posix-locks
|
||||
to release all locks from this transport
|
||||
*/
|
||||
tmp_frame->root->lk_owner = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
memset (&tmp_frame->root->lk_owner, 0,
|
||||
sizeof (gf_lkowner_t));
|
||||
|
||||
if (locker->fd) {
|
||||
GF_ASSERT (locker->fd->inode);
|
||||
@ -676,8 +678,9 @@ server_connection_destroy (xlator_t *this, server_connection_t *conn)
|
||||
list_for_each_entry_safe (locker, tmp, &entrylk_lockers, lockers) {
|
||||
tmp_frame = copy_frame (frame);
|
||||
|
||||
tmp_frame->root->lk_owner = 0;
|
||||
tmp_frame->root->trans = conn;
|
||||
memset (&tmp_frame->root->lk_owner, 0,
|
||||
sizeof (gf_lkowner_t));
|
||||
|
||||
if (locker->fd) {
|
||||
GF_ASSERT (locker->fd->inode);
|
||||
|
@ -52,14 +52,14 @@ gf_add_locker (struct _lock_table *table, const char *volume,
|
||||
loc_t *loc,
|
||||
fd_t *fd,
|
||||
pid_t pid,
|
||||
uint64_t owner,
|
||||
gf_lkowner_t *owner,
|
||||
glusterfs_fop_t type);
|
||||
|
||||
int32_t
|
||||
gf_del_locker (struct _lock_table *table, const char *volume,
|
||||
loc_t *loc,
|
||||
fd_t *fd,
|
||||
uint64_t owner,
|
||||
gf_lkowner_t *owner,
|
||||
glusterfs_fop_t type);
|
||||
|
||||
void
|
||||
|
@ -39,7 +39,7 @@ struct _locker {
|
||||
char *volume;
|
||||
loc_t loc;
|
||||
fd_t *fd;
|
||||
uint64_t owner;
|
||||
gf_lkowner_t owner;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
|
@ -247,12 +247,13 @@ server_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
if (op_ret >= 0) {
|
||||
if (state->flock.l_type == F_UNLCK)
|
||||
gf_del_locker (conn->ltable, state->volume,
|
||||
&state->loc, NULL, frame->root->lk_owner,
|
||||
&state->loc, NULL, &frame->root->lk_owner,
|
||||
GF_FOP_INODELK);
|
||||
else
|
||||
gf_add_locker (conn->ltable, state->volume,
|
||||
&state->loc, NULL, frame->root->pid,
|
||||
frame->root->lk_owner, GF_FOP_INODELK);
|
||||
&frame->root->lk_owner,
|
||||
GF_FOP_INODELK);
|
||||
} else if ((op_errno != ENOSYS) && (op_errno != EAGAIN)) {
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"%"PRId64": INODELK %s (%s) ==> %"PRId32" (%s)",
|
||||
@ -289,12 +290,12 @@ server_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
if (state->flock.l_type == F_UNLCK)
|
||||
gf_del_locker (conn->ltable, state->volume,
|
||||
NULL, state->fd,
|
||||
frame->root->lk_owner, GF_FOP_INODELK);
|
||||
&frame->root->lk_owner, GF_FOP_INODELK);
|
||||
else
|
||||
gf_add_locker (conn->ltable, state->volume,
|
||||
NULL, state->fd,
|
||||
frame->root->pid,
|
||||
frame->root->lk_owner, GF_FOP_INODELK);
|
||||
&frame->root->lk_owner, GF_FOP_INODELK);
|
||||
} else if ((op_errno != ENOSYS) && (op_errno != EAGAIN)) {
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"%"PRId64": FINODELK %"PRId64" (%s) ==> %"PRId32" (%s)",
|
||||
@ -329,11 +330,13 @@ server_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
if (op_ret >= 0) {
|
||||
if (state->cmd == ENTRYLK_UNLOCK)
|
||||
gf_del_locker (conn->ltable, state->volume,
|
||||
&state->loc, NULL, frame->root->lk_owner, GF_FOP_ENTRYLK);
|
||||
&state->loc, NULL, &frame->root->lk_owner,
|
||||
GF_FOP_ENTRYLK);
|
||||
else
|
||||
gf_add_locker (conn->ltable, state->volume,
|
||||
&state->loc, NULL, frame->root->pid,
|
||||
frame->root->lk_owner, GF_FOP_ENTRYLK);
|
||||
&frame->root->lk_owner,
|
||||
GF_FOP_ENTRYLK);
|
||||
} else if ((op_errno != ENOSYS) && (op_errno != EAGAIN)) {
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"%"PRId64": ENTRYLK %s (%s) ==> %"PRId32" (%s)",
|
||||
@ -367,11 +370,12 @@ server_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
|
||||
if (op_ret >= 0) {
|
||||
if (state->cmd == ENTRYLK_UNLOCK)
|
||||
gf_del_locker (conn->ltable, state->volume,
|
||||
NULL, state->fd, frame->root->lk_owner, GF_FOP_ENTRYLK);
|
||||
NULL, state->fd, &frame->root->lk_owner,
|
||||
GF_FOP_ENTRYLK);
|
||||
else
|
||||
gf_add_locker (conn->ltable, state->volume,
|
||||
NULL, state->fd, frame->root->pid,
|
||||
frame->root->lk_owner, GF_FOP_ENTRYLK);
|
||||
&frame->root->lk_owner, GF_FOP_ENTRYLK);
|
||||
} else if ((op_errno != ENOSYS) && (op_errno != EAGAIN)) {
|
||||
gf_log (this->name, GF_LOG_INFO,
|
||||
"%"PRId64": FENTRYLK %"PRId64" (%s) "
|
||||
|
Loading…
Reference in New Issue
Block a user