glusterd: nfs,shd,quotad,snapd daemons refactoring

This patch ports nfs, shd, quotad & snapd with the approach suggested in
http://www.gluster.org/pipermail/gluster-devel/2014-December/043180.html

Change-Id: I4ea5b38793f87fc85cc9d2cf873727351dedffd2
BUG: 1191486
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Signed-off-by:  Krishnan Parthasarathi <kparthas@redhat.com>
Reviewed-on: http://review.gluster.org/9428
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Nekkunti <anekkunt@redhat.com>
This commit is contained in:
Atin Mukherjee 2015-02-11 17:13:45 +05:30 committed by Krishnan Parthasarathi
parent 571a71f0ac
commit 9d842f9656
46 changed files with 2810 additions and 1751 deletions

View File

@ -0,0 +1,38 @@
How to introduce new daemons using daemon management framework
==============================================================
Glusterd manages GlusterFS daemons providing services like NFS, Proactive
self-heal, Quota, User servicable snapshots etc. Following are some of the
aspects that come under daemon management.
Data members & functions of different management objects
- **Connection Management**
- unix domain sockets based channel for internal communication
- rpc connection for the communication
- frame timeout value for UDS
- Methods - notify
- init, connect, termination, disconnect APIs can be invoked using the
connection management object
- **Process Management**
- Name of the process
- pidfile to detect if the daemon is running
- loggging directory, log file, volfile, volfileserver & volfileid
- init, stop APIs can be invoked using the process management object
- **Service Management**
- connection object
- process object
- online status
- Methods - manager, start, stop which can be abstracted as a common methods
or specific to service requirements
- init API can be invoked using the service management object
The above structures defines the skeleton of the daemon management framework.
Introduction of new daemons in GlusterFS needs to inherit these properties. Any
requirement specific to a daemon needs to be implemented in its own service
(for eg : snapd defines its own type glusterd_snapdsvc_t using glusterd_svc_t
and snapd specific data). New daemons will need to have its own service specific
code written in glusterd-<feature>-svc.h{c} and need to reuse the existing
framework.

View File

@ -1744,6 +1744,47 @@ out:
return;
}
void
rpc_clnt_disconnect (struct rpc_clnt *rpc)
{
rpc_clnt_connection_t *conn = NULL;
rpc_transport_t *trans = NULL;
if (!rpc)
goto out;
conn = &rpc->conn;
pthread_mutex_lock (&conn->lock);
{
if (conn->timer) {
gf_timer_call_cancel (rpc->ctx, conn->timer);
conn->timer = NULL;
}
if (conn->reconnect) {
gf_timer_call_cancel (rpc->ctx, conn->reconnect);
conn->reconnect = NULL;
}
conn->connected = 0;
if (conn->ping_timer) {
gf_timer_call_cancel (rpc->ctx, conn->ping_timer);
conn->ping_timer = NULL;
conn->ping_started = 0;
}
trans = conn->trans;
}
pthread_mutex_unlock (&conn->lock);
if (trans) {
rpc_transport_disconnect (trans);
}
out:
return;
}
void
rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)

View File

@ -244,6 +244,9 @@ int rpcclnt_cbk_program_register (struct rpc_clnt *svc,
void
rpc_clnt_disable (struct rpc_clnt *rpc);
void
rpc_clnt_disconnect (struct rpc_clnt *rpc);
char
rpc_clnt_is_disabled (struct rpc_clnt *rpc);

View File

@ -12,6 +12,17 @@ function check_readonly()
return $?
}
function lookup()
{
ls $1
if [ "$?" == "0" ]
then
echo "Y"
else
echo "N"
fi
}
cleanup;
TESTS_EXPECTED_IN_LOOP=10
@ -179,7 +190,9 @@ TEST fd_close $fd3;
# test 73
TEST $CLI volume set $V0 "features.snapshot-directory" .history
TEST ls $M0/.history;
#snapd client might take fraction of time to compare the volfile from glusterd
#hence a EXPECT_WITHIN is a better choice here
EXPECT_WITHIN 2 "Y" lookup "$M0/.history";
NUM_SNAPS=$(ls $M0/.history | wc -l);

View File

@ -14,7 +14,8 @@ TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
TEST $CLI volume start $V0;
sleep 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
## Mount FUSE
TEST $GFS -s $H0 --volfile-id $V0 $M0;
@ -28,8 +29,6 @@ TEST mount_nfs $H0:/$V0 $N0 nolock;
TEST $CLI volume status all
TEST $CLI volume status $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' nfs_up_status
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' glustershd_up_status
function test_nfs_cmds () {
local ret=0
declare -a nfs_cmds=("clients" "mem" "inode" "callpool")

View File

@ -11,7 +11,11 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \
glusterd-snapshot-utils.c
glusterd-snapshot-utils.c glusterd-conn-mgmt.c \
glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \
glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \
glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
@ -26,7 +30,11 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-pmap.h glusterd-volgen.h glusterd-mountbroker.h \
glusterd-syncop.h glusterd-hooks.h glusterd-locks.h \
glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \
glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h
glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \
glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \
glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \
glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \
glusterd-snapd-svc-helper.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \

View File

@ -21,6 +21,7 @@
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-svc-helper.h"
#include "run.h"
#include <sys/signal.h>
@ -1819,7 +1820,7 @@ glusterd_op_add_brick (dict_t *dict, char **op_errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
out:
return ret;
@ -2074,7 +2075,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
if (GF_OP_CMD_START == cmd &&
volinfo->status == GLUSTERD_STATUS_STARTED) {
ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to reconfigure NFS-Server");
@ -2106,7 +2107,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
}
} else {
if (GLUSTERD_STATUS_STARTED == volinfo->status)
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
}
out:

View File

@ -0,0 +1,18 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "glusterd-conn-mgmt.h"
#include "glusterd-svc-mgmt.h"
glusterd_svc_t *
glusterd_conn_get_svc_object (glusterd_conn_t *conn)
{
return list_entry (conn, glusterd_svc_t, conn);
}

View File

@ -0,0 +1,26 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_CONN_HELPER_H_
#define _GLUSTERD_CONN_HELPER_H_
#include "rpc-clnt.h"
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-conn-mgmt.h"
glusterd_svc_t *
glusterd_conn_get_svc_object (glusterd_conn_t *conn);
#endif

View File

@ -0,0 +1,135 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "xlator.h"
#include "rpc-clnt.h"
#include "glusterd.h"
#include "glusterd-conn-mgmt.h"
#include "glusterd-conn-helper.h"
#include "glusterd-utils.h"
int
glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
int frame_timeout, glusterd_conn_notify_t notify)
{
int ret = -1;
dict_t *options = NULL;
struct rpc_clnt *rpc = NULL;
xlator_t *this = THIS;
glusterd_svc_t *svc = NULL;
if (!this)
goto out;
svc = glusterd_conn_get_svc_object (conn);
if (!svc) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
goto out;
}
ret = rpc_transport_unix_options_build (&options, sockpath,
frame_timeout);
if (ret)
goto out;
ret = dict_set_str (options, "transport.socket.ignore-enoent", "on");
if (ret)
goto out;
/* @options is free'd by rpc_transport when destroyed */
rpc = rpc_clnt_new (options, this->ctx, (char *)svc->name, 16);
if (!rpc) {
ret = -1;
goto out;
}
ret = rpc_clnt_register_notify (rpc, glusterd_conn_common_notify,
conn);
if (ret)
goto out;
ret = snprintf (conn->sockpath, sizeof (conn->sockpath), "%s",
sockpath);
if (ret < 0)
goto out;
else
ret = 0;
conn->frame_timeout = frame_timeout;
conn->rpc = rpc;
conn->notify = notify;
out:
if (ret) {
if (rpc) {
rpc_clnt_unref (rpc);
rpc = NULL;
}
}
return ret;
}
int
glusterd_conn_term (glusterd_conn_t *conn)
{
rpc_clnt_disable (conn->rpc);
rpc_clnt_unref (conn->rpc);
return 0;
}
int
glusterd_conn_connect (glusterd_conn_t *conn)
{
return rpc_clnt_start (conn->rpc);
}
int
glusterd_conn_disconnect (glusterd_conn_t *conn)
{
rpc_clnt_disconnect (conn->rpc);
return 0;
}
int
__glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
glusterd_conn_t *conn = mydata;
/* Silently ignoring this error, exactly like the current
* implementation */
if (!conn)
return 0;
return conn->notify (conn, event);
}
int
glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
return glusterd_big_locked_notify
(rpc, mydata, event, data,
__glusterd_conn_common_notify);
}
int32_t
glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid,
char *socketpath, int len)
{
char sockfilepath[PATH_MAX] = {0,};
snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
rundir, uuid_utoa (uuid));
glusterd_set_socket_filepath (sockfilepath, socketpath, len);
return 0;
}

View File

@ -0,0 +1,56 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_CONN_MGMT_H_
#define _GLUSTERD_CONN_MGMT_H_
#include "rpc-clnt.h"
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
typedef struct glusterd_conn_ glusterd_conn_t;
typedef int (*glusterd_conn_notify_t)
(glusterd_conn_t *conn, rpc_clnt_event_t event);
struct glusterd_conn_ {
struct rpc_clnt *rpc;
char sockpath[PATH_MAX];
int frame_timeout;
/* Existing daemons tend to specialize their respective
* notify implementations, so ... */
glusterd_conn_notify_t notify;
};
int
glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
int frame_timeout, glusterd_conn_notify_t notify);
int
glusterd_conn_term (glusterd_conn_t *conn);
int
glusterd_conn_connect (glusterd_conn_t *conn);
int
glusterd_conn_disconnect (glusterd_conn_t *conn);
int
glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
int32_t
glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid,
char *socketpath, int len);
#endif

View File

@ -21,6 +21,7 @@
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-svc-helper.h"
#include "run.h"
#include "syscall.h"
@ -3903,7 +3904,7 @@ glusterd_marker_changelog_create_volfile (glusterd_volinfo_t *volinfo)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
ret = 0;
out:
return ret;

View File

@ -64,10 +64,12 @@ int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
void *data, rpc_clnt_notify_t notify_fn)
{
glusterd_conf_t *priv = THIS->private;
int ret = -1;
int ret = -1;
synclock_lock (&priv->big_lock);
ret = notify_fn (rpc, mydata, event, data);
synclock_unlock (&priv->big_lock);
return ret;
}
@ -4410,116 +4412,6 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
__glusterd_brick_rpc_notify);
}
int
__glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
this = THIS;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
volinfo = mydata;
if (!volinfo)
return 0;
switch (event) {
case RPC_CLNT_CONNECT:
gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
(void) glusterd_snapd_set_online_status (volinfo, _gf_true);
break;
case RPC_CLNT_DISCONNECT:
if (glusterd_is_snapd_online (volinfo)) {
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_NODE_DISCONNECTED,
"snapd for volume %s has disconnected from "
"glusterd.", volinfo->volname);
(void) glusterd_snapd_set_online_status
(volinfo, _gf_false);
}
break;
case RPC_CLNT_DESTROY:
glusterd_volinfo_unref (volinfo);
break;
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
break;
}
return ret;
}
int
glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
return glusterd_big_locked_notify (rpc, mydata, event, data,
__glusterd_snapd_rpc_notify);
}
int
__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
char *server = NULL;
int ret = 0;
this = THIS;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
server = mydata;
if (!server)
return 0;
switch (event) {
case RPC_CLNT_CONNECT:
gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
(void) glusterd_nodesvc_set_online_status (server, _gf_true);
ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
break;
case RPC_CLNT_DISCONNECT:
if (glusterd_is_nodesvc_online (server)) {
gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED,
"%s has disconnected from glusterd.", server);
(void) glusterd_nodesvc_set_online_status (server, _gf_false);
}
break;
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
break;
}
return ret;
}
int
glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data)
{
return glusterd_big_locked_notify (rpc, mydata, event, data,
__glusterd_nodesvc_rpc_notify);
}
int
glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
{

View File

@ -23,6 +23,9 @@
#include "glusterd-op-sm.h"
#include "glusterd-store.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-snapd-svc-helper.h"
#include "glusterd-quotad-svc.h"
#include "glusterfs3.h"
#include "protocol-common.h"
@ -187,7 +190,7 @@ build_volfile_path (char *volume_id, char *path,
"Couldn't find volinfo");
goto out;
}
glusterd_get_snapd_volfile (volinfo, path, path_len);
glusterd_svc_build_snapd_volfile (volinfo, path, path_len);
ret = 0;
goto out;
@ -202,8 +205,14 @@ build_volfile_path (char *volume_id, char *path,
}
volid_ptr++;
glusterd_get_nodesvc_volfile (volid_ptr, priv->workdir,
path, path_len);
if (strcmp (volid_ptr, "quotad") == 0)
glusterd_quotadsvc_build_volfile_path (volid_ptr,
priv->workdir,
path, path_len);
else
glusterd_svc_build_volfile_path (volid_ptr,
priv->workdir,
path, path_len);
ret = 0;
goto out;

View File

@ -0,0 +1,256 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-nfs-svc.h"
char *nfs_svc_name = "nfs";
int
glusterd_nfssvc_init (glusterd_svc_t *svc)
{
return glusterd_svc_init (svc, nfs_svc_name,
glusterd_nfssvc_manager,
glusterd_nfssvc_start,
glusterd_nfssvc_stop);
}
static int
glusterd_nfssvc_create_volfile ()
{
char filepath[PATH_MAX] = {0,};
glusterd_conf_t *conf = THIS->private;
glusterd_svc_build_volfile_path (nfs_svc_name, conf->workdir,
filepath, sizeof (filepath));
return glusterd_create_global_volfile (build_nfs_graph,
filepath, NULL);
}
static int
glusterd_nfssvc_check_volfile_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
xlator_t *this = NULL;
int ret = -1;
int need_unlink = 0;
int tmp_fd = -1;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (identical);
conf = this->private;
glusterd_svc_build_volfile_path (nfs_svc_name, conf->workdir,
nfsvol, sizeof (nfsvol));
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
tmp_fd = mkstemp (tmpnfsvol);
if (tmp_fd < 0) {
gf_log (this->name, GF_LOG_WARNING, "Unable to create temp file"
" %s:(%s)", tmpnfsvol, strerror (errno));
goto out;
}
need_unlink = 1;
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
identical);
if (ret)
goto out;
out:
if (need_unlink)
unlink (tmpnfsvol);
if (tmp_fd >= 0)
close (tmp_fd);
return ret;
}
static int
glusterd_nfssvc_check_topology_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
xlator_t *this = THIS;
int ret = -1;
int tmpclean = 0;
int tmpfd = -1;
if ((!identical) || (!this) || (!this->private))
goto out;
conf = (glusterd_conf_t *) this->private;
GF_ASSERT (conf);
/* Fetch the original NFS volfile */
glusterd_svc_build_volfile_path (conf->nfs_svc.name, conf->workdir,
nfsvol, sizeof (nfsvol));
/* Create the temporary NFS volfile */
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
tmpfd = mkstemp (tmpnfsvol);
if (tmpfd < 0) {
gf_log (this->name, GF_LOG_WARNING, "Unable to create temp file"
" %s: (%s)", tmpnfsvol, strerror (errno));
goto out;
}
tmpclean = 1; /* SET the flag to unlink() tmpfile */
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
/* Compare the topology of volfiles */
ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
identical);
out:
if (tmpfd >= 0)
close (tmpfd);
if (tmpclean)
unlink (tmpnfsvol);
return ret;
}
int
glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags)
{
int ret = -1;
if (glusterd_are_all_volumes_stopped ()) {
ret = svc->stop (svc, SIGKILL);
} else {
ret = glusterd_nfssvc_create_volfile ();
if (ret)
goto out;
ret = svc->stop (svc, SIGKILL);
if (ret)
goto out;
ret = svc->start (svc, flags);
if (ret)
goto out;
ret = glusterd_conn_connect (&(svc->conn));
if (ret)
goto out;
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_nfssvc_start (glusterd_svc_t *svc, int flags)
{
return glusterd_svc_start (svc, flags, NULL);
}
int
glusterd_nfssvc_stop (glusterd_svc_t *svc, int sig)
{
int ret = -1;
gf_boolean_t deregister = _gf_false;
if (glusterd_proc_is_running (&(svc->proc)))
deregister = _gf_true;
ret = glusterd_svc_stop (svc, sig);
if (ret)
goto out;
if (deregister)
glusterd_nfs_pmap_deregister ();
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_nfssvc_reconfigure ()
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
gf_boolean_t identical = _gf_false;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
/*
* Check both OLD and NEW volfiles, if they are SAME by size
* and cksum i.e. "character-by-character". If YES, then
* NOTHING has been changed, just return.
*/
ret = glusterd_nfssvc_check_volfile_identical (&identical);
if (ret)
goto out;
if (identical) {
ret = 0;
goto out;
}
/*
* They are not identical. Find out if the topology is changed
* OR just the volume options. If just the options which got
* changed, then inform the xlator to reconfigure the options.
*/
identical = _gf_false; /* RESET the FLAG */
ret = glusterd_nfssvc_check_topology_identical (&identical);
if (ret)
goto out;
/* Topology is not changed, but just the options. But write the
* options to NFS volfile, so that NFS will be reconfigured.
*/
if (identical) {
ret = glusterd_nfssvc_create_volfile();
if (ret == 0) {/* Only if above PASSES */
ret = glusterd_fetchspec_notify (THIS);
}
goto out;
}
/*
* NFS volfile's topology has been changed. NFS server needs
* to be RESTARTED to ACT on the changed volfile.
*/
ret = priv->nfs_svc.manager (&(priv->nfs_svc), NULL,
PROC_START_NO_WAIT);
out:
return ret;
}

View File

@ -0,0 +1,36 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_NFS_SVC_H_
#define _GLUSTERD_NFS_SVC_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-svc-mgmt.h"
int
glusterd_nfssvc_init (glusterd_svc_t *svc);
int
glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags);
int
glusterd_nfssvc_start (glusterd_svc_t *svc, int flags);
int
glusterd_nfssvc_stop (glusterd_svc_t *svc, int sig);
int
glusterd_nfssvc_reconfigure ();
#endif

View File

@ -44,6 +44,11 @@
#include "common-utils.h"
#include "run.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-shd-svc.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include <sys/types.h>
#include <signal.h>
@ -390,13 +395,6 @@ glusterd_set_volume_status (glusterd_volinfo_t *volinfo,
volinfo->status = status;
}
gf_boolean_t
glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
{
GF_ASSERT (volinfo);
return (volinfo->status == GLUSTERD_STATUS_STARTED);
}
static int
glusterd_op_sm_inject_all_acc (uuid_t *txn_id)
{
@ -1526,7 +1524,7 @@ glusterd_options_reset (glusterd_volinfo_t *volinfo, char *key,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
ret = glusterd_svcs_reconfigure (volinfo);
if (ret)
goto out;
}
@ -1878,7 +1876,8 @@ glusterd_op_set_volume (dict_t *dict)
int32_t dict_count = 0;
gf_boolean_t check_op_version = _gf_false;
uint32_t new_op_version = 0;
gf_boolean_t quorum_action = _gf_false;
gf_boolean_t quorum_action = _gf_false;
glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@ -2043,14 +2042,15 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
}
}
if (!global_opts_set) {
gd_update_volume_op_versions (volinfo);
ret = glusterd_handle_snapd_option (volinfo);
if (ret)
goto out;
if (!volinfo->is_snap_volume) {
svc = &(volinfo->snapd.svc);
ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
if (ret)
goto out;
}
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@ -2065,10 +2065,10 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to restart NFS-Server");
gf_log (this->name, GF_LOG_ERROR,
"Unable to restart services");
goto out;
}
}
@ -2078,9 +2078,13 @@ glusterd_op_set_volume (dict_t *dict)
volinfo = voliter;
gd_update_volume_op_versions (volinfo);
ret = glusterd_handle_snapd_option (volinfo);
if (ret)
goto out;
if (!volinfo->is_snap_volume) {
svc = &(volinfo->snapd.svc);
ret = svc->manager (svc, volinfo,
PROC_START_NO_WAIT);
if (ret)
goto out;
}
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
@ -2097,7 +2101,7 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to restart NFS-Server");
@ -2303,7 +2307,7 @@ glusterd_op_stats_volume (dict_t *dict, char **op_errstr,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
ret = glusterd_svcs_reconfigure (volinfo);
ret = 0;
@ -2614,23 +2618,24 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
vol_opts = volinfo->dict;
if ((cmd & GF_CLI_STATUS_NFS) != 0) {
ret = glusterd_add_node_to_dict ("nfs", rsp_dict, 0, vol_opts);
ret = glusterd_add_node_to_dict (priv->nfs_svc.name, rsp_dict,
0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
ret = glusterd_add_node_to_dict ("glustershd", rsp_dict, 0,
vol_opts);
ret = glusterd_add_node_to_dict (priv->shd_svc.name, rsp_dict,
0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
ret = glusterd_add_node_to_dict ("quotad", rsp_dict, 0,
vol_opts);
ret = glusterd_add_node_to_dict (priv->quotad_svc.name,
rsp_dict, 0, vol_opts);
if (ret)
goto out;
other_count++;
@ -2703,10 +2708,11 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
"nfs.disable",
_gf_false);
if (!nfs_disabled) {
ret = glusterd_add_node_to_dict ("nfs",
rsp_dict,
other_index,
vol_opts);
ret = glusterd_add_node_to_dict
(priv->nfs_svc.name,
rsp_dict,
other_index,
vol_opts);
if (ret)
goto out;
other_index++;
@ -2719,10 +2725,9 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
_gf_true);
if (glusterd_is_volume_replicate (volinfo)
&& shd_enabled) {
ret = glusterd_add_node_to_dict ("glustershd",
rsp_dict,
other_index,
vol_opts);
ret = glusterd_add_node_to_dict
(priv->shd_svc.name, rsp_dict,
other_index, vol_opts);
if (ret)
goto out;
other_count++;
@ -2730,10 +2735,11 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_index++;
}
if (glusterd_is_volume_quota_enabled (volinfo)) {
ret = glusterd_add_node_to_dict ("quotad",
rsp_dict,
other_index,
vol_opts);
ret = glusterd_add_node_to_dict
(priv->quotad_svc.name,
rsp_dict,
other_index,
vol_opts);
if (ret)
goto out;
other_count++;
@ -5122,7 +5128,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATS_INFO:
ret = dict_get_str_boolean (dict, "nfs", _gf_false);
if (ret) {
if (!glusterd_is_nodesvc_online ("nfs")) {
if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR, "NFS server"
" is not running");
@ -5134,7 +5140,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
pending_node->node = priv->nfs;
pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@ -5164,7 +5170,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATS_TOP:
ret = dict_get_str_boolean (dict, "nfs", _gf_false);
if (ret) {
if (!glusterd_is_nodesvc_online ("nfs")) {
if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR, "NFS server"
" is not running");
@ -5176,7 +5182,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
pending_node->node = priv->nfs;
pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@ -5581,7 +5587,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
switch (heal_op) {
case GF_AFR_OP_INDEX_SUMMARY:
case GF_AFR_OP_STATISTICS_HEAL_COUNT:
if (!glusterd_is_nodesvc_online ("glustershd")) {
if (!priv->shd_svc.online) {
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR, "Received "
"empty ctx.");
@ -5601,7 +5607,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
}
break;
case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
if (!glusterd_is_nodesvc_online ("glustershd")) {
if (!priv->shd_svc.online) {
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR, "Received "
"empty ctx.");
@ -5662,7 +5668,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
} else {
pending_node->node = priv->shd;
pending_node->node = &(priv->shd_svc);
pending_node->type = GD_NODE_SHD;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@ -5734,7 +5740,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_snapd_t *snapd = NULL;
glusterd_snapdsvc_t *snapd = NULL;
GF_ASSERT (dict);
@ -5806,7 +5812,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = 0;
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
if (!glusterd_is_nodesvc_online ("nfs")) {
if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
"NFS server is not running");
@ -5818,14 +5824,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
pending_node->node = priv->nfs;
pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
if (!glusterd_is_nodesvc_online ("glustershd")) {
if (!priv->shd_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
"Self-heal daemon is not running");
@ -5837,14 +5843,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
pending_node->node = priv->shd;
pending_node->node = &(priv->shd_svc);
pending_node->type = GD_NODE_SHD;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
if (!glusterd_is_nodesvc_online ("quotad")) {
if (!priv->quotad_svc.online) {
gf_log (this->name, GF_LOG_ERROR, "Quotad is not "
"running");
ret = -1;
@ -5856,14 +5862,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
pending_node->node = priv->quotad;
pending_node->node = &(priv->quotad_svc);
pending_node->type = GD_NODE_QUOTAD;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!glusterd_is_snapd_online (volinfo)) {
if (!volinfo->snapd.svc.online) {
gf_log (this->name, GF_LOG_ERROR, "snapd is not "
"running");
ret = -1;

View File

@ -0,0 +1,134 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include <stdio.h>
#include <limits.h>
#include <signal.h>
#include "common-utils.h"
#include "xlator.h"
#include "logging.h"
#include "glusterd-messages.h"
#include "glusterd-proc-mgmt.h"
int
glusterd_proc_init (glusterd_proc_t *proc, char *name, char *pidfile,
char *logdir, char *logfile, char *volfile, char *volfileid,
char *volfileserver)
{
int ret = -1;
ret = snprintf (proc->name, sizeof (proc->name), "%s", name);
if (ret < 0)
goto out;
ret = snprintf (proc->pidfile, sizeof (proc->pidfile), "%s", pidfile);
if (ret < 0)
goto out;
ret = snprintf (proc->logdir, sizeof (proc->logdir), "%s", logdir);
if (ret < 0)
goto out;
ret = snprintf (proc->logfile, sizeof (proc->logfile), "%s", logfile);
if (ret < 0)
goto out;
ret = snprintf (proc->volfile, sizeof (proc->volfile), "%s", volfile);
if (ret < 0)
goto out;
ret = snprintf (proc->volfileid, sizeof (proc->volfileid), "%s",
volfileid);
if (ret < 0)
goto out;
ret = snprintf (proc->volfileserver, sizeof (proc->volfileserver), "%s",
volfileserver);
if (ret < 0)
goto out;
out:
if (ret > 0)
ret = 0;
return ret;
}
int
glusterd_proc_stop (glusterd_proc_t *proc, int sig, int flags)
{
/* NB: Copy-paste code from glusterd_service_stop, the source may be
* removed once all daemon management use proc */
int32_t ret = -1;
pid_t pid = -1;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
if (!gf_is_service_running (proc->pidfile, &pid)) {
ret = 0;
gf_log (this->name, GF_LOG_INFO, "%s already stopped",
proc->name);
goto out;
}
gf_log (this->name, GF_LOG_DEBUG, "Stopping %s daemon running in pid: "
"%d", proc->name, pid);
ret = kill (pid, sig);
if (ret) {
switch (errno) {
case ESRCH:
gf_log (this->name, GF_LOG_DEBUG, "%s is already "
"stopped", proc->name);
ret = 0;
goto out;
default:
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_SVC_KILL_FAIL, "Unable to kill %s "
"service, reason:%s", proc->name,
strerror (errno));
}
}
if (flags != PROC_STOP_FORCE)
goto out;
sleep (1);
if (gf_is_service_running (proc->pidfile, NULL)) {
ret = kill (pid, SIGKILL);
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, errno,
GD_MSG_PID_KILL_FAIL, "Unable to kill pid:%d, "
"reason:%s", pid, strerror(errno));
goto out;
}
}
ret = 0;
out:
return ret;
}
int
glusterd_proc_get_pid (glusterd_proc_t *proc)
{
int pid = -1;
(void) gf_is_service_running (proc->pidfile, &pid);
return pid;
}
int
glusterd_proc_is_running (glusterd_proc_t *proc)
{
return gf_is_service_running (proc->pidfile, NULL);
}

View File

@ -0,0 +1,49 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_PROC_MGMT_H_
#define _GLUSTERD_PROC_MGMT_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
typedef struct glusterd_proc_ glusterd_proc_t;
enum proc_flags {
PROC_NONE = 0,
PROC_START,
PROC_START_NO_WAIT,
PROC_STOP,
PROC_STOP_FORCE
};
struct glusterd_proc_ {
char name[PATH_MAX];
char pidfile[PATH_MAX];
char logdir[PATH_MAX];
char logfile[PATH_MAX];
char volfile[PATH_MAX];
char volfileserver[PATH_MAX];
char volfileid[256];
};
int
glusterd_proc_init (glusterd_proc_t *proc, char *name, char *pidfile,
char *logdir, char *logfile, char *volfile, char *volfileid,
char *volfileserver);
int
glusterd_proc_stop (glusterd_proc_t *proc, int sig, int flags);
int
glusterd_proc_is_running (glusterd_proc_t *proc);
#endif

View File

@ -19,6 +19,8 @@
#include "glusterd-op-sm.h"
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-volgen.h"
#include "run.h"
#include "syscall.h"
@ -1007,17 +1009,27 @@ glusterd_set_quota_option (glusterd_volinfo_t *volinfo, dict_t *dict,
static int
glusterd_quotad_op (int opcode)
{
int ret = -1;
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
switch (opcode) {
case GF_QUOTA_OPTION_TYPE_ENABLE:
case GF_QUOTA_OPTION_TYPE_DISABLE:
if (glusterd_all_volumes_with_quota_stopped ())
ret = glusterd_quotad_stop ();
ret = glusterd_svc_stop (&(priv->quotad_svc),
SIGTERM);
else
ret = glusterd_check_generate_start_quotad_wait
();
ret = priv->quotad_svc.manager
(&(priv->quotad_svc), NULL,
PROC_START);
break;
default:
@ -1167,7 +1179,7 @@ glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
if (priv->op_version == GD_OP_VERSION_MIN)
ret = glusterd_check_generate_start_nfs ();
ret = priv->nfs_svc.manager (&(priv->nfs_svc), NULL, 0);
}
if (rsp_dict && start_crawl == _gf_true)

View File

@ -0,0 +1,158 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-quotad-svc.h"
char *quotad_svc_name = "quotad";
int glusterd_quotadsvc_init (glusterd_svc_t *svc)
{
int ret = -1;
char volfile[PATH_MAX] = {0,};
glusterd_conf_t *conf = THIS->private;
ret = glusterd_svc_init (svc, quotad_svc_name,
glusterd_quotadsvc_manager,
glusterd_quotadsvc_start,
glusterd_svc_stop);
if (ret)
goto out;
/* glusterd_svc_build_volfile_path () doesn't put correct quotad volfile
* path in proc object at service initialization. Re-initialize
* the correct path
*/
glusterd_quotadsvc_build_volfile_path (quotad_svc_name, conf->workdir,
volfile, sizeof (volfile));
snprintf (svc->proc.volfile, sizeof (svc->proc.volfile), "%s", volfile);
out:
return ret;
}
static int
glusterd_quotadsvc_create_volfile ()
{
char filepath[PATH_MAX] = {0,};
glusterd_conf_t *conf = THIS->private;
glusterd_quotadsvc_build_volfile_path (quotad_svc_name, conf->workdir,
filepath, sizeof (filepath));
return glusterd_create_global_volfile (build_quotad_graph,
filepath, NULL);
}
int
glusterd_quotadsvc_manager (glusterd_svc_t *svc, void *data, int flags)
{
int ret = 0;
glusterd_volinfo_t *volinfo = NULL;
volinfo = data;
/* If all the volumes are stopped or all shd compatible volumes
* are stopped then stop the service if:
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
* Otherwise create volfile and restart service if:
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
*/
if (glusterd_are_all_volumes_stopped () ||
glusterd_all_volumes_with_quota_stopped ()) {
if (!(volinfo && !glusterd_is_volume_quota_enabled (volinfo))) {
ret = svc->stop (svc, SIGTERM);
}
} else {
if (!(volinfo && !glusterd_is_volume_quota_enabled (volinfo))) {
ret = glusterd_quotadsvc_create_volfile ();
if (ret)
goto out;
ret = svc->stop (svc, SIGTERM);
if (ret)
goto out;
ret = svc->start (svc, flags);
if (ret)
goto out;
ret = glusterd_conn_connect (&(svc->conn));
if (ret)
goto out;
}
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_quotadsvc_start (glusterd_svc_t *svc, int flags)
{
int i = 0;
int ret = -1;
dict_t *cmdline = NULL;
char key[16] = {0};
char *options[] = {
"*replicate*.entry-self-heal=off",
"--xlator-option",
"*replicate*.metadata-self-heal=off",
"--xlator-option",
"*replicate*.data-self-heal=off",
"--xlator-option",
NULL
};
cmdline = dict_new ();
if (!cmdline)
goto out;
for (i = 0; options[i]; i++) {
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "arg%d", i);
ret = dict_set_str (cmdline, key, options[i]);
if (ret)
goto out;
}
ret = glusterd_svc_start (svc, flags, cmdline);
out:
if (cmdline)
dict_unref (cmdline);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_quotadsvc_reconfigure ()
{
return glusterd_svc_reconfigure (glusterd_quotadsvc_create_volfile);
}
void
glusterd_quotadsvc_build_volfile_path (char *server, char *workdir,
char *volfile, size_t len)
{
char dir[PATH_MAX] = {0,};
GF_ASSERT (len == PATH_MAX);
glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
snprintf (volfile, len, "%s/%s.vol", dir, server);
}

View File

@ -0,0 +1,36 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_QUOTAD_SVC_H_
#define _GLUSTERD_QUOTAD_SVC_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-svc-mgmt.h"
int
glusterd_quotadsvc_init (glusterd_svc_t *svc);
int
glusterd_quotadsvc_start (glusterd_svc_t *svc, int flags);
int
glusterd_quotadsvc_manager (glusterd_svc_t *svc, void *data, int flags);
int
glusterd_quotadsvc_reconfigure ();
void
glusterd_quotadsvc_build_volfile_path (char *server, char *workdir,
char *volfile, size_t len);
#endif

View File

@ -21,6 +21,9 @@
#include "glusterd-geo-rep.h"
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-volgen.h"
#include "run.h"
#include "syscall.h"
@ -668,12 +671,20 @@ rb_src_brick_restart (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *src_brickinfo,
int activate_pump)
{
int ret = 0;
int ret = 0;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
gf_log ("", GF_LOG_DEBUG,
"Attempting to kill src");
ret = glusterd_nfs_server_stop (volinfo);
ret = priv->nfs_svc.stop (&(priv->nfs_svc), SIGKILL);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to stop nfs, ret: %d",
@ -717,7 +728,7 @@ rb_src_brick_restart (glusterd_volinfo_t *volinfo,
}
out:
ret = glusterd_nfs_server_start (volinfo);
ret = priv->nfs_svc.start (&(priv->nfs_svc), PROC_START_NO_WAIT);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to start nfs, ret: %d",
ret);
@ -1771,7 +1782,7 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
}
}
ret = glusterd_nodesvcs_stop (volinfo);
ret = glusterd_svcs_stop (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to stop nfs server, ret: %d", ret);
@ -1783,13 +1794,13 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
gf_log (this->name, GF_LOG_CRITICAL, "Unable to add "
"dst-brick: %s to volume: %s", dst_brick,
volinfo->volname);
(void) glusterd_nodesvcs_handle_graph_change (volinfo);
(void) glusterd_svcs_manager (volinfo);
goto out;
}
volinfo->rebal.defrag_status = 0;
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
"Failed to generate nfs volume file");

View File

@ -0,0 +1,167 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
char *shd_svc_name = "glustershd";
int
glusterd_shdsvc_init (glusterd_svc_t *svc)
{
return glusterd_svc_init (svc, shd_svc_name,
glusterd_shdsvc_manager,
glusterd_shdsvc_start,
glusterd_svc_stop);
}
static int
glusterd_shdsvc_create_volfile ()
{
char filepath[PATH_MAX] = {0,};
int ret = -1;
glusterd_conf_t *conf = THIS->private;
dict_t *mod_dict = NULL;
mod_dict = dict_new ();
if (!mod_dict)
goto out;
ret = dict_set_uint32 (mod_dict, "cluster.background-self-heal-count",
0);
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.data-self-heal", "on");
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.metadata-self-heal", "on");
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.entry-self-heal", "on");
if (ret)
goto out;
glusterd_svc_build_volfile_path (shd_svc_name, conf->workdir,
filepath, sizeof (filepath));
ret = glusterd_create_global_volfile (build_shd_graph, filepath,
mod_dict);
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to create volfile");
goto out;
}
out:
if (mod_dict)
dict_unref (mod_dict);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_shdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
{
int ret = 0;
glusterd_volinfo_t *volinfo = NULL;
volinfo = data;
/* If all the volumes are stopped or all shd compatible volumes
* are stopped then stop the service if:
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
* Otherwise create volfile and restart service if:
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
*/
if (glusterd_are_all_volumes_stopped () ||
glusterd_all_shd_compatible_volumes_stopped ()) {
if (!(volinfo &&
!glusterd_is_shd_compatible_volume (volinfo))) {
ret = svc->stop (svc, SIGTERM);
}
} else {
if (!(volinfo &&
!glusterd_is_shd_compatible_volume (volinfo))) {
ret = glusterd_shdsvc_create_volfile ();
if (ret)
goto out;
ret = svc->stop (svc, SIGTERM);
if (ret)
goto out;
ret = svc->start (svc, flags);
if (ret)
goto out;
ret = glusterd_conn_connect (&(svc->conn));
if (ret)
goto out;
}
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_shdsvc_start (glusterd_svc_t *svc, int flags)
{
int ret = -1;
char glusterd_uuid_option[PATH_MAX] = {0};
dict_t *cmdline = NULL;
cmdline = dict_new ();
if (!cmdline)
goto out;
ret = snprintf (glusterd_uuid_option, sizeof (glusterd_uuid_option),
"*replicate*.node-uuid=%s", uuid_utoa (MY_UUID));
if (ret < 0)
goto out;
/* Pass cmdline arguments as key-value pair. The key is merely
* a carrier and is not used. Since dictionary follows LIFO the value
* should be put in reverse order*/
ret = dict_set_str (cmdline, "arg2", glusterd_uuid_option);
if (ret)
goto out;
ret = dict_set_str (cmdline, "arg1", "--xlator-option");
if (ret)
goto out;
ret = glusterd_svc_start (svc, flags, cmdline);
out:
if (cmdline)
dict_unref (cmdline);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_shdsvc_reconfigure ()
{
return glusterd_svc_reconfigure (glusterd_shdsvc_create_volfile);
}

View File

@ -0,0 +1,32 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_SHD_SVC_H_
#define _GLUSTERD_SHD_SVC_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-svc-mgmt.h"
int
glusterd_shdsvc_init (glusterd_svc_t *svc);
int
glusterd_shdsvc_manager (glusterd_svc_t *svc, void *data, int flags);
int
glusterd_shdsvc_start (glusterd_svc_t *svc, int flags);
int
glusterd_shdsvc_reconfigure ();
#endif

View File

@ -0,0 +1,63 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-snapd-svc-helper.h"
void
glusterd_svc_build_snapd_rundir (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char workdir[PATH_MAX] = {0,};
glusterd_conf_t *priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
snprintf (path, path_len, "%s/run", workdir);
}
void
glusterd_svc_build_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char sockfilepath[PATH_MAX] = {0,};
char rundir[PATH_MAX] = {0,};
glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
rundir, uuid_utoa (MY_UUID));
glusterd_set_socket_filepath (sockfilepath, path, path_len);
}
void
glusterd_svc_build_snapd_pidfile (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char rundir[PATH_MAX] = {0,};
glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
snprintf (path, path_len, "%s/%s-snapd.pid", rundir, volinfo->volname);
}
void
glusterd_svc_build_snapd_volfile (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char workdir[PATH_MAX] = {0,};
glusterd_conf_t *priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
snprintf (path, path_len, "%s/%s-snapd.vol", workdir,
volinfo->volname);
}

View File

@ -0,0 +1,37 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_SNAPD_SVC_HELPER_H_
#define _GLUSTERD_SNAPD_SVC_HELPER_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd.h"
void
glusterd_svc_build_snapd_rundir (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_svc_build_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_svc_build_snapd_pidfile (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_svc_build_snapd_volfile (glusterd_volinfo_t *volinfo,
char *path, int path_len);
#endif

View File

@ -0,0 +1,407 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
#include "glusterd-messages.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-conn-mgmt.h"
#include "glusterd-proc-mgmt.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-snapd-svc-helper.h"
#include "glusterd-snapshot-utils.h"
char *snapd_svc_name = "snapd";
static void
glusterd_svc_build_snapd_logdir (char *logdir, char *volname, size_t len)
{
snprintf (logdir, len, "%s/snaps/%s", DEFAULT_LOG_FILE_DIRECTORY,
volname);
}
static void
glusterd_svc_build_snapd_logfile (char *logfile, char *logdir, size_t len)
{
snprintf (logfile, len, "%s/snapd.log", logdir);
}
int
glusterd_snapdsvc_init (void *data)
{
int ret = -1;
char rundir[PATH_MAX] = {0,};
char sockpath[PATH_MAX] = {0,};
char pidfile[PATH_MAX] = {0,};
char volfile[PATH_MAX] = {0,};
char logdir[PATH_MAX] = {0,};
char logfile[PATH_MAX] = {0,};
char volfileid[256] = {0};
glusterd_svc_t *svc = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_conf_t *priv = NULL;
glusterd_conn_notify_t notify = NULL;
xlator_t *this = NULL;
char *volfileserver = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
volinfo = data;
svc = &(volinfo->snapd.svc);
ret = snprintf (svc->name, sizeof (svc->name), "%s", snapd_svc_name);
if (ret < 0)
goto out;
svc->manager = glusterd_snapdsvc_manager;
svc->start = glusterd_snapdsvc_start;
svc->stop = glusterd_svc_stop;
notify = glusterd_snapdsvc_rpc_notify;
glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
glusterd_svc_create_rundir (rundir);
/* Initialize the connection mgmt */
glusterd_svc_build_snapd_socket_filepath (volinfo, sockpath,
sizeof (sockpath));
ret = glusterd_conn_init (&(svc->conn), sockpath, 600, notify);
if (ret)
goto out;
/* Initialize the process mgmt */
glusterd_svc_build_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
glusterd_svc_build_snapd_volfile (volinfo, volfile, sizeof (volfile));
glusterd_svc_build_snapd_logdir (logdir, volinfo->volname,
sizeof (logdir));
ret = mkdir_p (logdir, 0755, _gf_true);
if ((ret == -1) && (EEXIST != errno)) {
gf_log (this->name, GF_LOG_ERROR, "Unable to create logdir %s",
logdir);
goto out;
}
glusterd_svc_build_snapd_logfile (logfile, logdir, sizeof (logfile));
snprintf (volfileid, sizeof (volfileid), "snapd/%s", volinfo->volname);
if (dict_get_str (this->options, "transport.socket.bind-address",
&volfileserver) != 0) {
volfileserver = "localhost";
}
ret = glusterd_proc_init (&(svc->proc), snapd_svc_name, pidfile, logdir,
logfile, volfile, volfileid, volfileserver);
if (ret)
goto out;
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_snapdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
{
int ret = 0;
xlator_t *this = THIS;
glusterd_volinfo_t *volinfo = NULL;
volinfo = data;
ret = glusterd_is_snapd_enabled (volinfo);
if (ret == -1) {
gf_log (this->name, GF_LOG_ERROR, "Failed to read volume "
"options");
goto out;
}
if (ret) {
if (!glusterd_is_volume_started (volinfo)) {
if (glusterd_proc_is_running (&svc->proc)) {
ret = svc->stop (svc, SIGTERM);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Couldn't stop snapd for "
"volume: %s",
volinfo->volname);
} else {
/* Since snapd is not running set ret to 0 */
ret = 0;
}
goto out;
}
ret = glusterd_snapdsvc_create_volfile (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
"snapd volfile for volume: %s",
volinfo->volname);
goto out;
}
ret = svc->start (svc, flags);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
"snapd for volume: %s", volinfo->volname);
goto out;
}
glusterd_volinfo_ref (volinfo);
ret = glusterd_conn_connect (&(svc->conn));
if (ret) {
glusterd_volinfo_unref (volinfo);
goto out;
}
} else if (glusterd_proc_is_running (&svc->proc)) {
ret = svc->stop (svc, SIGTERM);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Couldn't stop snapd for volume: %s",
volinfo->volname);
goto out;
}
volinfo->snapd.port = 0;
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags)
{
int ret = -1;
runner_t runner = {0,};
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
char valgrind_logfile[PATH_MAX] = {0};
int snapd_port = 0;
char msg[1024] = {0,};
char snapd_id[PATH_MAX] = {0,};
glusterd_volinfo_t *volinfo = NULL;
glusterd_snapdsvc_t *snapd = NULL;
this = THIS;
GF_ASSERT(this);
priv = this->private;
GF_ASSERT (priv);
if (glusterd_proc_is_running (&svc->proc)) {
ret = 0;
goto out;
}
/* Get volinfo->snapd from svc object */
snapd = list_entry (svc, glusterd_snapdsvc_t, svc);
if (!snapd) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get snapd object "
"from snapd service");
goto out;
}
/* Get volinfo from snapd */
volinfo = list_entry (snapd, glusterd_volinfo_t, snapd);
if (!volinfo) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volinfo from "
"from snapd");
goto out;
}
ret = access (svc->proc.volfile, F_OK);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG,
"snapd Volfile %s is not present", svc->proc.volfile);
/* If glusterd is down on one of the nodes and during
* that time "USS is enabled" for the first time. After some
* time when the glusterd which was down comes back it tries
* to look for the snapd volfile and it does not find snapd
* volfile and because of this starting of snapd fails.
* Therefore, if volfile is not present then create a fresh
* volfile.
*/
ret = glusterd_snapdsvc_create_volfile (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
"snapd volfile for volume: %s",
volinfo->volname);
goto out;
}
}
runinit (&runner);
if (priv->valgrind) {
snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
svc->proc.logdir);
runner_add_args (&runner, "valgrind", "--leak-check=full",
"--trace-children=yes", "--track-origins=yes",
NULL);
runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
}
snprintf (snapd_id, sizeof (snapd_id), "snapd-%s", volinfo->volname);
runner_add_args (&runner, SBIN_DIR"/glusterfsd",
"-s", svc->proc.volfileserver,
"--volfile-id", svc->proc.volfileid,
"-p", svc->proc.pidfile,
"-l", svc->proc.logfile,
"--brick-name", snapd_id,
"-S", svc->conn.sockpath, NULL);
snapd_port = volinfo->snapd.port;
if (!snapd_port) {
snapd_port = pmap_registry_alloc (THIS);
if (!snapd_port) {
snprintf (msg, sizeof (msg), "Could not allocate port "
"for snapd service for volume %s",
volinfo->volname);
runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
ret = -1;
goto out;
}
}
runner_add_arg (&runner, "--brick-port");
runner_argprintf (&runner, "%d", snapd_port);
runner_add_arg (&runner, "--xlator-option");
runner_argprintf (&runner, "%s-server.listen-port=%d",
volinfo->volname, snapd_port);
runner_add_arg (&runner, "--no-mem-accounting");
snprintf (msg, sizeof (msg),
"Starting the snapd service for volume %s", volinfo->volname);
runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
if (flags == PROC_START_NO_WAIT) {
ret = runner_run_nowait (&runner);
} else {
synclock_unlock (&priv->big_lock);
{
ret = runner_run (&runner);
}
synclock_lock (&priv->big_lock);
}
volinfo->snapd.port = snapd_port;
out:
return ret;
}
int
glusterd_snapdsvc_restart ()
{
glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
glusterd_svc_t *svc = NULL;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
list_for_each_entry (volinfo, &conf->volumes, vol_list) {
/* Init per volume snapd svc */
ret = glusterd_snapdsvc_init (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "snapd service "
"initialization failed for volume %s",
volinfo->volname);
goto out;
}
gf_log (this->name, GF_LOG_DEBUG, "snapd service initialized "
"for %s", volinfo->volname);
/* Start per volume snapd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED &&
glusterd_is_snapd_enabled (volinfo)) {
svc = &(volinfo->snapd.svc);
ret = svc->start (svc, PROC_START_NO_WAIT);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Couldn't start snapd for "
"vol: %s", volinfo->volname);
goto out;
}
}
}
out:
return ret;
}
int
glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event)
{
int ret = 0;
glusterd_svc_t *svc = NULL;
xlator_t *this = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_snapdsvc_t *snapd = NULL;
this = THIS;
GF_ASSERT (this);
svc = list_entry (conn, glusterd_svc_t, conn);
if (!svc) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
return -1;
}
switch (event) {
case RPC_CLNT_CONNECT:
gf_log (this->name, GF_LOG_DEBUG, "%s has connected with "
"glusterd.", svc->name);
svc->online = _gf_true;
break;
case RPC_CLNT_DISCONNECT:
if (svc->online) {
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
"from glusterd.", svc->name);
svc->online = _gf_false;
}
break;
case RPC_CLNT_DESTROY:
snapd = list_entry (svc, glusterd_snapdsvc_t, svc);
if (!snapd) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get the "
"snapd object");
return -1;
}
volinfo = list_entry (snapd, glusterd_volinfo_t, snapd);
if (!volinfo) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get the "
"volinfo object");
return -1;
}
glusterd_volinfo_unref (volinfo);
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
break;
}
return ret;
}

View File

@ -0,0 +1,44 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_SNAPD_SVC_H_
#define _GLUSTERD_SNAPD_SVC_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-svc-mgmt.h"
typedef struct glusterd_snapdsvc_ glusterd_snapdsvc_t;
struct glusterd_snapdsvc_{
glusterd_svc_t svc;
int port;
gf_store_handle_t *handle;
};
int
glusterd_snapdsvc_init (void *data);
int
glusterd_snapdsvc_manager (glusterd_svc_t *svc, void *data, int flags);
int
glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags);
int
glusterd_snapdsvc_restart ();
int
glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event);
#endif

View File

@ -26,6 +26,9 @@
#include "glusterd-utils.h"
#include "glusterd-store.h"
#include "glusterd-volgen.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-svc-helper.h"
#include "glusterd-snapd-svc-helper.h"
#include "glusterd-snapshot-utils.h"
/*
@ -1721,12 +1724,6 @@ out:
return ret;
}
struct rpc_clnt*
glusterd_snapd_get_rpc (glusterd_volinfo_t *volinfo)
{
return volinfo->snapd.rpc;
}
int32_t
glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
dict_t *dict, int32_t count)
@ -1767,7 +1764,7 @@ glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
glusterd_svc_build_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
brick_online = gf_is_service_running (pidfile, &pid);
@ -3231,407 +3228,6 @@ glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo)
return ret;
}
void
glusterd_get_snapd_rundir (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char workdir[PATH_MAX] = {0,};
glusterd_conf_t *priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
snprintf (path, path_len, "%s/run", workdir);
}
void
glusterd_get_snapd_volfile (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char workdir[PATH_MAX] = {0,};
glusterd_conf_t *priv = THIS->private;
GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
snprintf (path, path_len, "%s/%s-snapd.vol", workdir,
volinfo->volname);
}
void
glusterd_get_snapd_pidfile (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char rundir[PATH_MAX] = {0,};
glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
snprintf (path, path_len, "%s/%s-snapd.pid", rundir, volinfo->volname);
}
void
glusterd_set_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
char *path, int path_len)
{
char sockfilepath[PATH_MAX] = {0,};
char rundir[PATH_MAX] = {0,};
glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
rundir, uuid_utoa (MY_UUID));
glusterd_set_socket_filepath (sockfilepath, path, path_len);
}
gf_boolean_t
glusterd_is_snapd_running (glusterd_volinfo_t *volinfo)
{
char pidfile[PATH_MAX] = {0,};
int pid = -1;
glusterd_conf_t *priv = THIS->private;
glusterd_get_snapd_pidfile (volinfo, pidfile,
sizeof (pidfile));
return gf_is_service_running (pidfile, &pid);
}
int
glusterd_restart_snapds (glusterd_conf_t *priv)
{
glusterd_volinfo_t *volinfo = NULL;
int ret = 0;
xlator_t *this = THIS;
list_for_each_entry (volinfo, &priv->volumes, vol_list) {
if (volinfo->status == GLUSTERD_STATUS_STARTED &&
glusterd_is_snapd_enabled (volinfo)) {
ret = glusterd_snapd_start (volinfo,
_gf_false);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Couldn't start snapd for "
"vol: %s", volinfo->volname);
goto out;
}
}
}
out:
return ret;
}
gf_boolean_t
glusterd_is_snapd_online (glusterd_volinfo_t *volinfo)
{
return volinfo->snapd.online;
}
void
glusterd_snapd_set_online_status (glusterd_volinfo_t *volinfo,
gf_boolean_t status)
{
volinfo->snapd.online = status;
}
static inline void
glusterd_snapd_set_rpc (glusterd_volinfo_t *volinfo, struct rpc_clnt *rpc)
{
volinfo->snapd.rpc = rpc;
}
int32_t
glusterd_snapd_connect (glusterd_volinfo_t *volinfo, char *socketpath)
{
int ret = 0;
dict_t *options = NULL;
struct rpc_clnt *rpc = NULL;
glusterd_conf_t *priv = THIS->private;
rpc = glusterd_snapd_get_rpc (volinfo);
if (rpc == NULL) {
/* Setting frame-timeout to 10mins (600seconds).
* Unix domain sockets ensures that the connection is reliable.
* The default timeout of 30mins used for unreliable network
* connections is too long for unix domain socket connections.
*/
ret = rpc_transport_unix_options_build (&options, socketpath,
600);
if (ret)
goto out;
ret = dict_set_str(options,
"transport.socket.ignore-enoent", "on");
if (ret)
goto out;
glusterd_volinfo_ref (volinfo);
synclock_unlock (&priv->big_lock);
ret = glusterd_rpc_create (&rpc, options,
glusterd_snapd_rpc_notify,
volinfo);
synclock_lock (&priv->big_lock);
if (ret)
goto out;
(void) glusterd_snapd_set_rpc (volinfo, rpc);
}
out:
return ret;
}
int32_t
glusterd_snapd_disconnect (glusterd_volinfo_t *volinfo)
{
struct rpc_clnt *rpc = NULL;
glusterd_conf_t *priv = THIS->private;
rpc = glusterd_snapd_get_rpc (volinfo);
(void) glusterd_snapd_set_rpc (volinfo, NULL);
if (rpc)
glusterd_rpc_clnt_unref (priv, rpc);
return 0;
}
int32_t
glusterd_snapd_start (glusterd_volinfo_t *volinfo, gf_boolean_t wait)
{
int32_t ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
runner_t runner = {0,};
char pidfile[PATH_MAX] = {0,};
char logfile[PATH_MAX] = {0,};
char logdir[PATH_MAX] = {0,};
char volfile[PATH_MAX] = {0,};
char glusterd_uuid[1024] = {0,};
char rundir[PATH_MAX] = {0,};
char sockfpath[PATH_MAX] = {0,};
char volfileid[256] = {0};
char *volfileserver = NULL;
char valgrind_logfile[PATH_MAX] = {0};
int snapd_port = 0;
char *volname = volinfo->volname;
char snapd_id[PATH_MAX] = {0,};
char msg[1024] = {0,};
this = THIS;
GF_ASSERT(this);
if (glusterd_is_snapd_running (volinfo)) {
ret = 0;
goto connect;
}
priv = this->private;
glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
ret = mkdir (rundir, 0777);
if ((ret == -1) && (EEXIST != errno)) {
gf_log (this->name, GF_LOG_ERROR, "Unable to create rundir %s",
rundir);
goto out;
}
glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
glusterd_get_snapd_volfile (volinfo, volfile, sizeof (volfile));
ret = sys_access (volfile, F_OK);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG,
"snapd Volfile %s is not present", volfile);
/* If glusterd is down on one of the nodes and during
* that time "USS is enabled" for the first time. After some
* time when the glusterd which was down comes back it tries
* to look for the snapd volfile and it does not find snapd
* volfile and because of this starting of snapd fails.
* Therefore, if volfile is not present then create a fresh
* volfile.
*/
ret = glusterd_create_snapd_volfile (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
"snapd volfile for volume: %s",
volinfo->volname);
goto out;
}
}
snprintf (logdir, PATH_MAX, "%s/snaps/%s",
DEFAULT_LOG_FILE_DIRECTORY, volname);
ret = mkdir_p (logdir, 0755, _gf_true);
if ((ret == -1) && (EEXIST != errno)) {
gf_log (this->name, GF_LOG_ERROR, "Unable to create logdir %s",
logdir);
goto out;
}
snprintf (logfile, PATH_MAX, "%s/snapd.log", logdir);
snprintf (volfileid, sizeof (volfileid), "snapd/%s", volname);
glusterd_set_snapd_socket_filepath (volinfo, sockfpath,
sizeof (sockfpath));
if (dict_get_str (this->options, "transport.socket.bind-address",
&volfileserver) != 0) {
volfileserver = "localhost";
}
runinit (&runner);
if (priv->valgrind) {
snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
logdir);
runner_add_args (&runner, "valgrind", "--leak-check=full",
"--trace-children=yes", "--track-origins=yes",
NULL);
runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
}
snprintf (snapd_id, sizeof (snapd_id), "snapd-%s", volname);
runner_add_args (&runner, SBIN_DIR"/glusterfsd",
"-s", volfileserver,
"--volfile-id", volfileid,
"-p", pidfile,
"-l", logfile,
"--brick-name", snapd_id,
"-S", sockfpath, NULL);
snapd_port = volinfo->snapd.port;
if (!snapd_port) {
snapd_port = pmap_registry_alloc (THIS);
if (!snapd_port) {
snprintf (msg, sizeof (msg), "Could not allocate port "
"for snapd service for volume %s", volname);
runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
ret = -1;
goto out;
}
}
runner_add_arg (&runner, "--brick-port");
runner_argprintf (&runner, "%d", snapd_port);
runner_add_arg (&runner, "--xlator-option");
runner_argprintf (&runner, "%s-server.listen-port=%d",
volname, snapd_port);
runner_add_arg (&runner, "--no-mem-accounting");
snprintf (msg, sizeof (msg),
"Starting the snapd service for volume %s", volname);
runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
if (!wait) {
ret = runner_run_nowait (&runner);
} else {
synclock_unlock (&priv->big_lock);
{
ret = runner_run (&runner);
}
synclock_lock (&priv->big_lock);
}
volinfo->snapd.port = snapd_port;
connect:
if (ret == 0)
glusterd_snapd_connect (volinfo, sockfpath);
out:
return ret;
}
int
glusterd_snapd_stop (glusterd_volinfo_t *volinfo)
{
char pidfile[PATH_MAX] = {0,};
char sockfpath[PATH_MAX] = {0,};
glusterd_conf_t *priv = THIS->private;
int ret = 0;
(void)glusterd_snapd_disconnect (volinfo);
if (!glusterd_is_snapd_running (volinfo))
goto out;
glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
ret = glusterd_service_stop ("snapd", pidfile, SIGTERM, _gf_true);
if (ret == 0) {
glusterd_set_snapd_socket_filepath (volinfo, sockfpath,
sizeof (sockfpath));
(void)glusterd_unlink_file (sockfpath);
}
out:
return ret;
}
int
glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = THIS;
if (volinfo->is_snap_volume)
return 0;
ret = glusterd_is_snapd_enabled (volinfo);
if (ret == -1) {
gf_log (this->name, GF_LOG_ERROR, "Failed to read volume "
"options");
goto out;
}
if (ret) {
if (!glusterd_is_volume_started (volinfo)) {
if (glusterd_is_snapd_running (volinfo)) {
ret = glusterd_snapd_stop (volinfo);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Couldn't stop snapd for "
"volume: %s",
volinfo->volname);
} else {
/* Since snapd is not running set ret to 0 */
ret = 0;
}
goto out;
}
ret = glusterd_create_snapd_volfile (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
"snapd volfile for volume: %s",
volinfo->volname);
goto out;
}
ret = glusterd_snapd_start (volinfo, _gf_false);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
"snapd for volume: %s", volinfo->volname);
goto out;
}
} else if (glusterd_is_snapd_running (volinfo)) {
ret = glusterd_snapd_stop (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Couldn't stop snapd for volume: %s",
volinfo->volname);
goto out;
}
volinfo->snapd.port = 0;
}
out:
return ret;
}
int32_t
glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo, dict_t *dict)

View File

@ -137,54 +137,13 @@ glusterd_snapshot_restore_cleanup (dict_t *rsp_dict,
char *volname,
glusterd_snap_t *snap);
int
glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo);
int32_t
glusterd_snapd_disconnect (glusterd_volinfo_t *volinfo);
void
glusterd_get_snapd_dir (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_get_snapd_rundir (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_get_snapd_volfile (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_get_snapd_pidfile (glusterd_volinfo_t *volinfo,
char *path, int path_len);
void
glusterd_set_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
char *path, int path_len);
gf_boolean_t
glusterd_is_snapd_running (glusterd_volinfo_t *volinfo);
int
glusterd_snapd_stop (glusterd_volinfo_t *volinfo);
int
glusterd_snapd_start (glusterd_volinfo_t *volinfo, gf_boolean_t wait);
int
glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo);
gf_boolean_t
glusterd_is_snapd_online (glusterd_volinfo_t *volinfo);
void
glusterd_snapd_set_online_status (glusterd_volinfo_t *volinfo,
gf_boolean_t status);
int
glusterd_restart_snapds (glusterd_conf_t *priv);
int32_t
glusterd_check_and_set_config_limit (glusterd_conf_t *priv);

View File

@ -55,6 +55,7 @@
#include "glusterd-mgmt.h"
#include "glusterd-syncop.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-snapd-svc.h"
#include "glusterfs3.h"
@ -8209,6 +8210,14 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict,
/* Use the same version as the original version */
new_volinfo->version = orig_vol->version;
/* Initialize the snapd service */
ret = glusterd_snapdsvc_init (new_volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to initialize snapd "
"service for volume %s", orig_vol->volname);
goto out;
}
/* Copy the snap vol info to the new_volinfo.*/
ret = glusterd_snap_volinfo_restore (dict, rsp_dict, new_volinfo,
snap_vol, volcount);

View File

@ -15,10 +15,11 @@
#include "statedump.h"
#include "glusterd.h"
#include "glusterd-shd-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-locks.h"
static void
glusterd_dump_peer (glusterd_peerinfo_t *peerinfo, char *input_key, int index,
gf_boolean_t xpeers)
@ -223,13 +224,13 @@ glusterd_dump_priv (xlator_t *this)
gf_proc_dump_write (key, "%d", priv->ping_timeout);
gf_proc_dump_build_key (key, "glusterd", "shd.online");
gf_proc_dump_write (key, "%d", priv->shd->online);
gf_proc_dump_write (key, "%d", priv->shd_svc.online);
gf_proc_dump_build_key (key, "glusterd", "nfs.online");
gf_proc_dump_write (key, "%d", priv->nfs->online);
gf_proc_dump_write (key, "%d", priv->nfs_svc.online);
gf_proc_dump_build_key (key, "glusterd", "quotad.online");
gf_proc_dump_write (key, "%d", priv->quotad->online);
gf_proc_dump_write (key, "%d", priv->quotad_svc.online);
GLUSTERD_DUMP_PEERS (&priv->peers, uuid_list, _gf_false);
GLUSTERD_DUMP_PEERS (&priv->xaction_peers, op_peers_list,

View File

@ -2021,7 +2021,8 @@ out:
return ret;
}
static int
int
glusterd_restore_op_version (xlator_t *this)
{
glusterd_conf_t *conf = NULL;
@ -4273,13 +4274,6 @@ glusterd_restore ()
this = THIS;
ret = glusterd_restore_op_version (this);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to restore op_version");
goto out;
}
ret = glusterd_store_retrieve_volumes (this, NULL);
if (ret)
goto out;

View File

@ -167,4 +167,8 @@ glusterd_store_update_missed_snaps ();
glusterd_volinfo_t*
glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap);
int
glusterd_restore_op_version (xlator_t *this);
#endif

View File

@ -0,0 +1,125 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd.h"
#include "glusterfs.h"
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-nfs-svc.h"
int
glusterd_svcs_reconfigure (glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
ret = glusterd_nfssvc_reconfigure ();
if (ret)
goto out;
if (volinfo && !glusterd_is_shd_compatible_volume (volinfo)) {
; /* Do nothing */
} else {
ret = glusterd_shdsvc_reconfigure ();
if (ret)
goto out;
}
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
if (volinfo && !glusterd_is_volume_quota_enabled (volinfo))
goto out;
ret = glusterd_quotadsvc_reconfigure ();
if (ret)
goto out;
out:
return ret;
}
int
glusterd_svcs_stop ()
{
int ret = 0;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
ret = glusterd_svc_stop (&(priv->nfs_svc), SIGKILL);
if (ret)
goto out;
ret = glusterd_svc_stop (&(priv->shd_svc), SIGTERM);
if (ret)
goto out;
ret = glusterd_svc_stop (&(priv->quotad_svc), SIGTERM);
if (ret)
goto out;
out:
return ret;
}
int
glusterd_svcs_manager (glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
if (volinfo && volinfo->is_snap_volume)
return 0;
ret = conf->nfs_svc.manager (&(conf->nfs_svc), NULL,
PROC_START_NO_WAIT);
if (ret)
goto out;
ret = conf->shd_svc.manager (&(conf->shd_svc), volinfo,
PROC_START_NO_WAIT);
if (ret == -EINVAL)
ret = 0;
if (ret)
goto out;
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
ret = conf->quotad_svc.manager (&(conf->quotad_svc), volinfo,
PROC_START_NO_WAIT);
if (ret == -EINVAL)
ret = 0;
if (ret)
goto out;
out:
return ret;
}

View File

@ -0,0 +1,30 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_SVC_HELPER_H_
#define _GLUSTERD_SVC_HELPER_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd.h"
#include "glusterd-svc-mgmt.h"
int
glusterd_svcs_reconfigure (glusterd_volinfo_t *volinfo);
int
glusterd_svcs_stop ();
int
glusterd_svcs_manager (glusterd_volinfo_t *volinfo);
#endif

View File

@ -0,0 +1,339 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#include "globals.h"
#include "run.h"
#include "glusterd.h"
#include "glusterfs.h"
#include "glusterd-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-proc-mgmt.h"
#include "glusterd-conn-mgmt.h"
#include "glusterd-messages.h"
int
glusterd_svc_create_rundir (char *rundir)
{
int ret = -1;
ret = mkdir (rundir, 0777);
if ((ret == -1) && (EEXIST != errno)) {
gf_log (THIS->name, GF_LOG_ERROR, "Unable to create rundir %s",
rundir);
}
return ret;
}
static void
glusterd_svc_build_logfile_path (char *server, char *logdir, char *logfile,
size_t len)
{
snprintf (logfile, len, "%s/%s.log", logdir, server);
}
static void
glusterd_svc_build_volfileid_path (char *server, char *volfileid, size_t len)
{
snprintf (volfileid, len, "gluster/%s", server);
}
static int
glusterd_svc_init_common (glusterd_svc_t *svc,
char *svc_name, char *workdir,
char *rundir, char *logdir,
glusterd_svc_manager_t manager,
glusterd_svc_start_t start,
glusterd_svc_stop_t stop,
glusterd_conn_notify_t notify)
{
int ret = -1;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
char pidfile[PATH_MAX] = {0,};
char logfile[PATH_MAX] = {0,};
char volfile[PATH_MAX] = {0,};
char sockfpath[PATH_MAX] = {0,};
char volfileid[256] = {0};
char *volfileserver = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
ret = snprintf (svc->name, sizeof (svc->name), "%s", svc_name);
if (ret < 0)
goto out;
svc->manager = manager;
svc->start = start;
svc->stop = stop;
if (!notify)
notify = glusterd_svc_common_rpc_notify;
glusterd_svc_create_rundir (rundir);
/* Initialize the connection mgmt */
glusterd_conn_build_socket_filepath (rundir, MY_UUID,
sockfpath, sizeof (sockfpath));
ret = glusterd_conn_init (&(svc->conn), sockfpath, 600, notify);
if (ret)
goto out;
/* Initialize the process mgmt */
glusterd_svc_build_pidfile_path (svc_name, workdir, pidfile,
sizeof(pidfile));
glusterd_svc_build_volfile_path (svc_name, workdir, volfile,
sizeof (volfile));
glusterd_svc_build_logfile_path (svc_name, logdir, logfile,
sizeof (logfile));
glusterd_svc_build_volfileid_path (svc_name, volfileid,
sizeof(volfileid));
if (dict_get_str (this->options, "transport.socket.bind-address",
&volfileserver) != 0) {
volfileserver = "localhost";
}
ret = glusterd_proc_init (&(svc->proc), svc_name, pidfile, logdir,
logfile, volfile, volfileid, volfileserver);
if (ret)
goto out;
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
static int
svc_add_args (dict_t *cmdline, char *arg, data_t *value, void *data)
{
runner_t *runner = data;
runner_add_arg (runner, value->data);
return 0;
}
int glusterd_svc_init (glusterd_svc_t *svc, char *svc_name,
glusterd_svc_manager_t manager,
glusterd_svc_start_t start,
glusterd_svc_stop_t stop)
{
int ret = -1;
char rundir[PATH_MAX] = {0,};
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
glusterd_svc_build_rundir (svc_name, priv->workdir, rundir,
sizeof (rundir));
ret = glusterd_svc_init_common (svc, svc_name, priv->workdir, rundir,
DEFAULT_LOG_FILE_DIRECTORY, manager,
start, stop, NULL);
return ret;
}
int
glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline)
{
int ret = -1;
runner_t runner = {0,};
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
char valgrind_logfile[PATH_MAX] = {0};
char glusterd_uuid_option[1024] = {0};
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
if (glusterd_proc_is_running (&(svc->proc))) {
ret = 0;
goto out;
}
ret = access (svc->proc.volfile, F_OK);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Volfile %s is not present",
svc->proc.volfile);
goto out;
}
runinit (&runner);
if (priv->valgrind) {
snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
svc->proc.logfile, svc->name);
runner_add_args (&runner, "valgrind", "--leak-check=full",
"--trace-children=yes", "--track-origins=yes",
NULL);
runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
}
runner_add_args (&runner, SBIN_DIR"/glusterfs",
"-s", svc->proc.volfileserver,
"--volfile-id", svc->proc.volfileid,
"-p", svc->proc.pidfile,
"-l", svc->proc.logfile,
"-S", svc->conn.sockpath,
NULL);
if (cmdline)
dict_foreach (cmdline, svc_add_args, (void *) &runner);
gf_log (this->name, GF_LOG_DEBUG, "Starting %s service", svc->name);
if (flags == PROC_START_NO_WAIT) {
ret = runner_run_nowait (&runner);
} else {
synclock_unlock (&priv->big_lock);
{
ret = runner_run (&runner);
}
synclock_lock (&priv->big_lock);
}
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int glusterd_svc_stop (glusterd_svc_t *svc, int sig)
{
int ret = -1;
ret = glusterd_proc_stop (&(svc->proc), sig, PROC_STOP_FORCE);
if (ret)
goto out;
glusterd_conn_disconnect (&(svc->conn));
if (ret == 0) {
svc->online = _gf_false;
(void) glusterd_unlink_file ((char *)svc->conn.sockpath);
}
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
void
glusterd_svc_build_pidfile_path (char *server, char *workdir, char *path,
size_t len)
{
char dir[PATH_MAX] = {0};
GF_ASSERT (len == PATH_MAX);
glusterd_svc_build_rundir (server, workdir, dir, sizeof (dir));
snprintf (path, len, "%s/%s.pid", dir, server);
}
void
glusterd_svc_build_volfile_path (char *server, char *workdir, char *volfile,
size_t len)
{
char dir[PATH_MAX] = {0,};
GF_ASSERT (len == PATH_MAX);
glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
snprintf (volfile, len, "%s/%s-server.vol", dir, server);
}
void
glusterd_svc_build_svcdir (char *server, char *workdir, char *path, size_t len)
{
GF_ASSERT (len == PATH_MAX);
snprintf (path, len, "%s/%s", workdir, server);
}
void
glusterd_svc_build_rundir (char *server, char *workdir, char *path, size_t len)
{
char dir[PATH_MAX] = {0};
GF_ASSERT (len == PATH_MAX);
glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
snprintf (path, len, "%s/run", dir);
}
int
glusterd_svc_reconfigure (int (*create_volfile) ())
{
int ret = -1;
ret = create_volfile ();
if (ret)
goto out;
ret = glusterd_fetchspec_notify (THIS);
out:
return ret;
}
int
glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
rpc_clnt_event_t event)
{
int ret = 0;
glusterd_svc_t *svc = NULL;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
/* Get the parent onject i.e. svc using list_entry macro */
svc = list_entry (conn, glusterd_svc_t, conn);
if (!svc) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
return -1;
}
switch (event) {
case RPC_CLNT_CONNECT:
gf_log (this->name, GF_LOG_DEBUG, "%s has connected with "
"glusterd.", svc->name);
svc->online = _gf_true;
break;
case RPC_CLNT_DISCONNECT:
if (svc->online) {
gf_msg (this->name, GF_LOG_INFO, 0,
GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
"from glusterd.", svc->name);
svc->online = _gf_false;
}
break;
default:
gf_log (this->name, GF_LOG_TRACE,
"got some other RPC event %d", event);
break;
}
return ret;
}

View File

@ -0,0 +1,78 @@
/*
Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _GLUSTERD_SVC_MGMT_H_
#define _GLUSTERD_SVC_MGMT_H_
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
#endif
#include "glusterd-proc-mgmt.h"
#include "glusterd-conn-mgmt.h"
struct glusterd_svc_;
typedef struct glusterd_svc_ glusterd_svc_t;
typedef int (*glusterd_svc_manager_t) (glusterd_svc_t *svc,
void *data, int flags);
typedef int (*glusterd_svc_start_t) (glusterd_svc_t *svc, int flags);
typedef int (*glusterd_svc_stop_t) (glusterd_svc_t *svc, int sig);
struct glusterd_svc_ {
char name[PATH_MAX];
glusterd_conn_t conn;
glusterd_proc_t proc;
glusterd_svc_manager_t manager;
glusterd_svc_start_t start;
glusterd_svc_stop_t stop;
gf_boolean_t online;
};
int
glusterd_svc_create_rundir (char *rundir);
int
glusterd_svc_init (glusterd_svc_t *svc, char *svc_name,
glusterd_svc_manager_t manager,
glusterd_svc_start_t start,
glusterd_svc_stop_t stop);
int
glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline);
int
glusterd_svc_stop (glusterd_svc_t *svc, int sig);
void
glusterd_svc_build_pidfile_path (char *server, char *workdir,
char *path, size_t len);
void
glusterd_svc_build_volfile_path (char *server, char *workdir,
char *volfile, size_t len);
void
glusterd_svc_build_svcdir (char *server, char *workdir,
char *path, size_t len);
void
glusterd_svc_build_rundir (char *server, char *workdir,
char *path, size_t len);
int
glusterd_svc_reconfigure (int (*create_volfile) ());
int
glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
rpc_clnt_event_t event);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -186,65 +186,12 @@ int
glusterd_compute_cksum (glusterd_volinfo_t *volinfo,
gf_boolean_t is_quota_conf);
void
glusterd_get_nodesvc_volfile (char *server, char *workdir,
char *volfile, size_t len);
gf_boolean_t
glusterd_is_nodesvc_running ();
gf_boolean_t
glusterd_is_nodesvc_running ();
void
glusterd_get_nodesvc_dir (char *server, char *workdir,
char *path, size_t len);
int32_t
glusterd_nfs_server_start ();
int32_t
glusterd_nfs_server_stop ();
int32_t
glusterd_shd_start ();
int32_t
glusterd_shd_stop ();
int32_t
glusterd_quotad_start ();
int32_t
glusterd_quotad_start_wait ();
int32_t
glusterd_quotad_stop ();
void
glusterd_set_socket_filepath (char *sock_filepath, char *sockpath, size_t len);
int32_t
glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid,
char *socketpath, int len);
struct rpc_clnt*
glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node);
struct rpc_clnt*
glusterd_nodesvc_get_rpc (char *server);
int32_t
glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc);
int32_t
glusterd_nodesvc_connect (char *server, char *socketpath);
void
glusterd_nodesvc_set_online_status (char *server, gf_boolean_t status);
gf_boolean_t
glusterd_is_nodesvc_online (char *server);
int
glusterd_remote_hostname_get (rpcsvc_request_t *req,
char *remote_host, int len);
@ -253,29 +200,6 @@ glusterd_import_friend_volumes (dict_t *peer_data);
void
glusterd_set_volume_status (glusterd_volinfo_t *volinfo,
glusterd_volume_status status);
int
glusterd_check_generate_start_nfs (void);
int
glusterd_check_generate_start_shd (void);
int
glusterd_check_generate_start_quotad (void);
int
glusterd_check_generate_start_quotad_wait (void);
int
glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo);
int
glusterd_nodesvcs_handle_reconfigure (glusterd_volinfo_t *volinfo);
int
glusterd_nodesvcs_start (glusterd_volinfo_t *volinfo);
int
glusterd_nodesvcs_stop (glusterd_volinfo_t *volinfo);
int32_t
glusterd_volume_count_get (void);
@ -635,9 +559,6 @@ glusterd_is_volume_quota_enabled (glusterd_volinfo_t *volinfo);
gf_boolean_t
glusterd_all_volumes_with_quota_stopped ();
int
glusterd_reconfigure_quotad ();
void
glusterd_clean_up_quota_store (glusterd_volinfo_t *volinfo);
@ -754,4 +675,18 @@ glusterd_import_quota_conf (dict_t *peer_data, int vol_idx,
gf_boolean_t
glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo);
gf_boolean_t
glusterd_are_all_volumes_stopped ();
gf_boolean_t
glusterd_all_shd_compatible_volumes_stopped ();
void
glusterd_nfs_pmap_deregister ();
gf_boolean_t
glusterd_is_volume_started (glusterd_volinfo_t *volinfo);
#endif

View File

@ -35,6 +35,9 @@
#include "run.h"
#include "options.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-snapd-svc-helper.h"
extern struct volopt_map_entry glusterd_volopt_map[];
@ -44,13 +47,6 @@ extern struct volopt_map_entry glusterd_volopt_map[];
*
*********************************************/
struct volgen_graph {
char **errstr;
glusterfs_graph_t graph;
};
typedef struct volgen_graph volgen_graph_t;
static void
set_graph_errstr (volgen_graph_t *graph, const char *str)
{
@ -3428,7 +3424,7 @@ out:
return ret;
}
static int
int
build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
glusterd_volinfo_t *voliter = NULL;
@ -3475,7 +3471,7 @@ out:
}
/* builds a graph for nfs server role, with option overrides in mod_dict */
static int
int
build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
volgen_graph_t cgraph = {0,};
@ -3725,7 +3721,7 @@ glusterd_generate_brick_volfile (glusterd_volinfo_t *volinfo,
return ret;
}
static int
int
build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
volgen_graph_t cgraph = {0};
@ -4042,238 +4038,8 @@ out:
}
int
glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo)
{
int ret = -1;
ret = glusterd_generate_brick_volfile (volinfo, brickinfo);
if (!ret)
ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
if (!ret)
ret = glusterd_fetchspec_notify (THIS);
return ret;
}
int
glusterd_create_volfiles (glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
this = THIS;
ret = generate_brick_volfiles (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Could not generate volfiles for bricks");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Could not generate trusted client volfiles");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_OTHER);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Could not generate client volfiles");
out:
return ret;
}
int
glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
this = THIS;
ret = glusterd_create_volfiles (volinfo);
if (ret)
goto out;
ret = glusterd_fetchspec_notify (this);
out:
return ret;
}
int
glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
dict_t *set_dict),
char *filepath, dict_t *mod_dict)
{
volgen_graph_t graph = {0,};
int ret = -1;
ret = builder (&graph, mod_dict);
if (!ret)
ret = volgen_write_volfile (&graph, filepath);
volgen_graph_free (&graph);
return ret;
}
int
glusterd_create_nfs_volfile ()
{
char filepath[PATH_MAX] = {0,};
glusterd_conf_t *conf = THIS->private;
glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
filepath, sizeof (filepath));
return glusterd_create_global_volfile (build_nfs_graph,
filepath, NULL);
}
int
glusterd_create_shd_volfile ()
{
char filepath[PATH_MAX] = {0,};
int ret = -1;
glusterd_conf_t *conf = THIS->private;
dict_t *mod_dict = NULL;
mod_dict = dict_new ();
if (!mod_dict)
goto out;
ret = dict_set_uint32 (mod_dict, "cluster.background-self-heal-count", 0);
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.data-self-heal", "on");
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.metadata-self-heal", "on");
if (ret)
goto out;
ret = dict_set_str (mod_dict, "cluster.entry-self-heal", "on");
if (ret)
goto out;
glusterd_get_nodesvc_volfile ("glustershd", conf->workdir,
filepath, sizeof (filepath));
ret = glusterd_create_global_volfile (build_shd_graph, filepath,
mod_dict);
out:
if (mod_dict)
dict_unref (mod_dict);
return ret;
}
int
glusterd_check_nfs_volfile_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
xlator_t *this = NULL;
int ret = -1;
int need_unlink = 0;
int tmp_fd = -1;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (identical);
conf = this->private;
glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
nfsvol, sizeof (nfsvol));
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
tmp_fd = mkstemp (tmpnfsvol);
if (tmp_fd < 0) {
gf_log ("", GF_LOG_WARNING, "Unable to create temp file %s: "
"(%s)", tmpnfsvol, strerror (errno));
goto out;
}
need_unlink = 1;
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
identical);
if (ret)
goto out;
out:
if (need_unlink)
unlink (tmpnfsvol);
if (tmp_fd >= 0)
close (tmp_fd);
return ret;
}
int
glusterd_check_nfs_topology_identical (gf_boolean_t *identical)
{
char nfsvol[PATH_MAX] = {0,};
char tmpnfsvol[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
xlator_t *this = THIS;
int ret = -1;
int tmpclean = 0;
int tmpfd = -1;
if ((!identical) || (!this) || (!this->private))
goto out;
conf = (glusterd_conf_t *) this->private;
/* Fetch the original NFS volfile */
glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
nfsvol, sizeof (nfsvol));
/* Create the temporary NFS volfile */
snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
tmpfd = mkstemp (tmpnfsvol);
if (tmpfd < 0) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to create temp file %s: (%s)",
tmpnfsvol, strerror (errno));
goto out;
}
tmpclean = 1; /* SET the flag to unlink() tmpfile */
ret = glusterd_create_global_volfile (build_nfs_graph,
tmpnfsvol, NULL);
if (ret)
goto out;
/* Compare the topology of volfiles */
ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
identical);
out:
if (tmpfd >= 0)
close (tmpfd);
if (tmpclean)
unlink (tmpnfsvol);
return ret;
}
int
glusterd_generate_snapd_volfile (volgen_graph_t *graph,
glusterd_volinfo_t *volinfo)
glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
glusterd_volinfo_t *volinfo)
{
xlator_t *xl = NULL;
char *username = NULL;
@ -4370,15 +4136,15 @@ glusterd_generate_snapd_volfile (volgen_graph_t *graph,
}
int
glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo)
glusterd_snapdsvc_create_volfile (glusterd_volinfo_t *volinfo)
{
volgen_graph_t graph = {0,};
int ret = -1;
char filename [PATH_MAX] = {0,};
glusterd_get_snapd_volfile (volinfo, filename, PATH_MAX);
glusterd_svc_build_snapd_volfile (volinfo, filename, PATH_MAX);
ret = glusterd_generate_snapd_volfile (&graph, volinfo);
ret = glusterd_snapdsvc_generate_volfile (&graph, volinfo);
if (!ret)
ret = volgen_write_volfile (&graph, filename);
@ -4388,17 +4154,85 @@ glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo)
}
int
glusterd_create_quotad_volfile (void *data)
glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo)
{
char filepath[PATH_MAX] = {0,};
glusterd_conf_t *conf = THIS->private;
int ret = -1;
glusterd_get_nodesvc_volfile ("quotad", conf->workdir,
filepath, sizeof (filepath));
return glusterd_create_global_volfile (build_quotad_graph,
filepath, NULL);
ret = glusterd_generate_brick_volfile (volinfo, brickinfo);
if (!ret)
ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
if (!ret)
ret = glusterd_fetchspec_notify (THIS);
return ret;
}
int
glusterd_create_volfiles (glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
this = THIS;
ret = generate_brick_volfiles (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Could not generate volfiles for bricks");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Could not generate trusted client volfiles");
goto out;
}
ret = generate_client_volfiles (volinfo, GF_CLIENT_OTHER);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Could not generate client volfiles");
out:
return ret;
}
int
glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
this = THIS;
ret = glusterd_create_volfiles (volinfo);
if (ret)
goto out;
ret = glusterd_fetchspec_notify (this);
out:
return ret;
}
int
glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
dict_t *set_dict),
char *filepath, dict_t *mod_dict)
{
volgen_graph_t graph = {0,};
int ret = -1;
ret = builder (&graph, mod_dict);
if (!ret)
ret = volgen_write_volfile (&graph, filepath);
volgen_graph_free (&graph);
return ret;
}
int
glusterd_delete_volfile (glusterd_volinfo_t *volinfo,

View File

@ -49,6 +49,12 @@ typedef enum {
GF_CLIENT_OTHER
} glusterd_client_type_t;
struct volgen_graph {
char **errstr;
glusterfs_graph_t graph;
};
typedef struct volgen_graph volgen_graph_t;
#define COMPLETE_OPTION(key, completion, ret) \
do { \
if (!strchr (key, '.')) { \
@ -119,6 +125,18 @@ struct volopt_map_entry {
//gf_boolean_t client_option;
};
int
glusterd_snapdsvc_create_volfile (glusterd_volinfo_t *volinfo);
int
glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
glusterd_volinfo_t *volinfo);
int
glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
dict_t *set_dict),
char *filepath, dict_t *mod_dict);
int
glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo);
@ -132,19 +150,17 @@ glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo);
void
glusterd_get_nfs_filepath (char *filename);
void glusterd_get_shd_filepath (char *filename);
void
glusterd_get_shd_filepath (char *filename);
int
glusterd_create_nfs_volfile ();
build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
glusterd_create_shd_volfile ();
build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
glusterd_create_quotad_volfile ();
int
glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo);
build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
glusterd_delete_volfile (glusterd_volinfo_t *volinfo,

View File

@ -29,6 +29,10 @@
#include "glusterd-messages.h"
#include "run.h"
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-shd-svc.h"
#include "glusterd-snapd-svc.h"
#include <stdint.h>
#include <sys/socket.h>
@ -1617,6 +1621,8 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
if (!priv) {
ret = -1;
@ -1704,7 +1710,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
break;
default:
if (!glusterd_is_nodesvc_online("glustershd")){
if (!priv->shd_svc.online) {
ret = -1;
*op_errstr = gf_strdup ("Self-heal daemon is "
"not running. Check self-heal "
@ -2133,6 +2139,12 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
volinfo->caps = caps;
ret = glusterd_snapdsvc_init (volinfo);
if (ret) {
*op_errstr = gf_strdup ("Failed to initialize snapd service");
goto out;
}
ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
glusterd_store_delete_volume (volinfo);
@ -2218,6 +2230,7 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@ -2266,11 +2279,14 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
if (ret)
goto out;
ret = glusterd_handle_snapd_option (volinfo);
if (ret)
goto out;
if (!volinfo->is_snap_volume) {
svc = &(volinfo->snapd.svc);
ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
if (ret)
goto out;
}
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
out:
gf_log (this->name, GF_LOG_TRACE, "returning %d ", ret);
@ -2285,6 +2301,7 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)
char mountdir[PATH_MAX] = {0,};
char pidfile[PATH_MAX] = {0,};
xlator_t *this = NULL;
glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@ -2326,11 +2343,14 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)
mountdir, strerror (errno));
}
ret = glusterd_handle_snapd_option (volinfo);
if (ret)
goto out;
if (!volinfo->is_snap_volume) {
svc = &(volinfo->snapd.svc);
ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
if (ret)
goto out;
}
ret = glusterd_nodesvcs_handle_graph_change (volinfo);
ret = glusterd_svcs_manager (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to notify graph "
"change for %s volume", volinfo->volname);

View File

@ -38,6 +38,11 @@
#include "glusterd-hooks.h"
#include "glusterd-utils.h"
#include "glusterd-locks.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
#include "glusterd-snapd-svc.h"
#include "common-utils.h"
#include "glusterd-geo-rep.h"
#include "run.h"
@ -1175,6 +1180,47 @@ out:
return ret;
}
static int
glusterd_svc_init_all ()
{
int ret = -1;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
/* Init SHD svc */
ret = glusterd_shdsvc_init (&(priv->shd_svc));
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to init shd service");
goto out;
}
gf_log (THIS->name, GF_LOG_DEBUG, "shd service initialized");
/* Init NFS svc */
ret = glusterd_nfssvc_init (&(priv->nfs_svc));
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to init nfs service");
goto out;
}
gf_log (THIS->name, GF_LOG_DEBUG, "nfs service initialized");
/* Init QuotaD svc */
ret = glusterd_quotadsvc_init (&(priv->quotad_svc));
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR, "Failed to init quotad "
"service");
goto out;
}
gf_log (THIS->name, GF_LOG_DEBUG, "quotad service initialized");
out:
return ret;
}
/*
* init - called during glusterd initialization
@ -1461,14 +1507,6 @@ init (xlator_t *this)
gf_gld_mt_glusterd_conf_t);
GF_VALIDATE_OR_GOTO(this->name, conf, out);
conf->shd = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
GF_VALIDATE_OR_GOTO(this->name, conf->shd, out);
conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
GF_VALIDATE_OR_GOTO(this->name, conf->nfs, out);
conf->quotad = GF_CALLOC (1, sizeof (nodesrv_t),
gf_gld_mt_nodesrv_t);
GF_VALIDATE_OR_GOTO(this->name, conf->quotad, out);
INIT_LIST_HEAD (&conf->peers);
INIT_LIST_HEAD (&conf->xaction_peers);
INIT_LIST_HEAD (&conf->volumes);
@ -1518,7 +1556,6 @@ init (xlator_t *this)
this->private = conf;
glusterd_mgmt_v3_lock_init ();
glusterd_txn_opinfo_dict_init ();
(void) glusterd_nodesvc_set_online_status ("glustershd", _gf_false);
GLUSTERD_GET_HOOKS_DIR (hooks_dir, GLUSTERD_HOOK_VER, conf);
if (stat (hooks_dir, &buf)) {
@ -1548,6 +1585,26 @@ init (xlator_t *this)
if (ret)
goto out;
/* Restoring op-version needs to be done before initializing the
* services as glusterd_svc_init_common () invokes
* glusterd_conn_build_socket_filepath () which uses MY_UUID macro.
* MY_UUID generates a new uuid if its not been generated and writes it
* in the info file, Since the op-version is not read yet
* the default value i.e. 0 will be written for op-version and restore
* will fail. This is why restoring op-version needs to happen before
* service initialization
* */
ret = glusterd_restore_op_version (this);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to restore op_version");
goto out;
}
ret = glusterd_svc_init_all ();
if (ret)
goto out;
ret = glusterd_restore ();
if (ret < 0)
goto out;

View File

@ -32,6 +32,7 @@
#include "glusterd-mem-types.h"
#include "rpcsvc.h"
#include "glusterd-sm.h"
#include "glusterd-snapd-svc.h"
#include "glusterd1-xdr.h"
#include "protocol-common.h"
#include "glusterd-pmap.h"
@ -117,18 +118,6 @@ struct glusterd_volgen {
dict_t *dict;
};
typedef struct {
struct rpc_clnt *rpc;
gf_boolean_t online;
} nodesrv_t;
typedef struct {
struct rpc_clnt *rpc;
int port;
gf_boolean_t online;
gf_store_handle_t *handle;
} glusterd_snapd_t;
typedef struct {
struct _volfile_ctx *volfile;
pthread_mutex_t mutex;
@ -139,9 +128,9 @@ typedef struct {
uuid_t uuid;
char workdir[PATH_MAX];
rpcsvc_t *rpc;
nodesrv_t *shd;
nodesrv_t *nfs;
nodesrv_t *quotad;
glusterd_svc_t shd_svc;
glusterd_svc_t nfs_svc;
glusterd_svc_t quotad_svc;
struct pmap_registry *pmap;
struct list_head volumes;
struct list_head snapshots; /*List of snap volumes */
@ -381,7 +370,7 @@ struct glusterd_volinfo_ {
int refcnt;
gd_quorum_status_t quorum_status;
glusterd_snapd_t snapd;
glusterd_snapdsvc_t snapd;
};
typedef enum gd_snap_status_ {
@ -902,14 +891,6 @@ int
glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
int
glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
int
glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
int
glusterd_rpc_create (struct rpc_clnt **rpc, dict_t *options,
rpc_clnt_notify_t notify_fn, void *notify_data);