NFS-Ganesha: Install scripts, config files, and resource agent scripts
Resubmitting after a gerrit bug bungled the merge of http://review.gluster.org/9621 (was it really a gerrit bug?) Scripts related to NFS-Ganesha are in extras/ganesha/scripts. Config files are in extras/ganesha/config. Resource Agent files are in extras/ganesha/ocf Files are copied to appropriate locations. Change-Id: I137169f4d653ee2b7d6df14d41e2babd0ae8d10c BUG: 1188184 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/9912 Tested-by: Gluster Build System <jenkins@build.gluster.com>
This commit is contained in:
parent
c8be9af5f5
commit
d81182cf69
@ -183,6 +183,10 @@ AC_CONFIG_FILES([Makefile
|
||||
extras/init.d/glusterd-Redhat
|
||||
extras/init.d/glusterd-FreeBSD
|
||||
extras/init.d/glusterd-SuSE
|
||||
extras/ganesha/Makefile
|
||||
extras/ganesha/config/Makefile
|
||||
extras/ganesha/scripts/Makefile
|
||||
extras/ganesha/ocf/Makefile
|
||||
extras/systemd/Makefile
|
||||
extras/systemd/glusterd.service
|
||||
extras/run-gluster.tmpfiles
|
||||
|
@ -2,7 +2,7 @@ EditorModedir = $(docdir)
|
||||
EditorMode_DATA = glusterfs-mode.el glusterfs.vim
|
||||
|
||||
SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
|
||||
$(GEOREP_EXTRAS_SUBDIR)
|
||||
$(GEOREP_EXTRAS_SUBDIR) ganesha
|
||||
|
||||
confdir = $(sysconfdir)/glusterfs
|
||||
conf_DATA = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \
|
||||
|
2
extras/ganesha/Makefile.am
Normal file
2
extras/ganesha/Makefile.am
Normal file
@ -0,0 +1,2 @@
|
||||
SUBDIRS = scripts config ocf
|
||||
CLEANFILES =
|
4
extras/ganesha/config/Makefile.am
Normal file
4
extras/ganesha/config/Makefile.am
Normal file
@ -0,0 +1,4 @@
|
||||
EXTRA_DIST= ganesha-ha.conf.sample
|
||||
|
||||
confdir = $(sysconfdir)/ganesha
|
||||
conf_DATA = ganesha-ha.conf.sample
|
15
extras/ganesha/config/ganesha-ha.conf.sample
Normal file
15
extras/ganesha/config/ganesha-ha.conf.sample
Normal file
@ -0,0 +1,15 @@
|
||||
# Name of the HA cluster created.
|
||||
HA_NAME="ganesha-ha-360"
|
||||
# Shared volume to store NFS state.
|
||||
HA_VOL_NAME="ha-state-volname"
|
||||
# Mount point of the shared volume.
|
||||
HA_VOL_MNT="/mnt-nfs"
|
||||
# The server on which shared volume is created.
|
||||
HA_VOL_SERVER="server1"
|
||||
# The subset of nodes of the Gluster Trusted Pool
|
||||
# that forms the ganesha HA cluster. IP/Hostname
|
||||
# is specified.
|
||||
HA_CLUSTER_NODES="server1,server2,..."
|
||||
# Virtual IPs of each of the nodes specified above.
|
||||
VIP_server1="10.x.x.x"
|
||||
VIP_server2="10.x.x.x"
|
12
extras/ganesha/ocf/Makefile.am
Normal file
12
extras/ganesha/ocf/Makefile.am
Normal file
@ -0,0 +1,12 @@
|
||||
EXTRA_DIST= ganesha_grace ganesha_mon ganesha_nfsd
|
||||
|
||||
# The root of the OCF resource agent hierarchy
|
||||
# Per the OCF standard, it's always "lib",
|
||||
# not "lib64" (even on 64-bit platforms).
|
||||
ocfdir = $(prefix)/lib/ocf
|
||||
|
||||
# The provider directory
|
||||
radir = $(ocfdir)/resource.d/heartbeat
|
||||
|
||||
ra_SCRIPTS = ganesha_grace ganesha_mon ganesha_nfsd
|
||||
|
168
extras/ganesha/ocf/ganesha_grace
Normal file
168
extras/ganesha/ocf/ganesha_grace
Normal file
@ -0,0 +1,168 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2014 Anand Subramanian anands@redhat.com
|
||||
# Copyright (c) 2015 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it would be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
#
|
||||
# Further, this software is distributed without any warranty that it is
|
||||
# free of the rightful claim of any third person regarding infringement
|
||||
# or the like. Any license provided herein, whether implied or
|
||||
# otherwise, applies only to this software file. Patent licenses, if
|
||||
# any, provided herein do not apply to combinations of this program with
|
||||
# other software, or any other product whatsoever.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
#
|
||||
#
|
||||
|
||||
# Initialization:
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||
. $OCF_DEBUG_LIBRARY
|
||||
else
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
fi
|
||||
|
||||
ganesha_meta_data() {
|
||||
cat <<END
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
<resource-agent name="ganesha_grace">
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
This Linux-specific resource agent acts as a dummy
|
||||
resource agent for nfs-ganesha.
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
|
||||
|
||||
<parameters>
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
<action name="start" timeout="40s" />
|
||||
<action name="stop" timeout="40s" />
|
||||
<action name="status" depth="0" timeout="20s" interval="5s" />
|
||||
<action name="monitor" depth="0" timeout="20s" interval="5s" />
|
||||
<action name="meta-data" timeout="20s" />
|
||||
</actions>
|
||||
</resource-agent>
|
||||
END
|
||||
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_grace_usage() {
|
||||
echo "ganesha.nfsd USAGE"
|
||||
}
|
||||
|
||||
# Make sure meta-data and usage always succeed
|
||||
case $__OCF_ACTION in
|
||||
meta-data) ganesha_meta_data
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
usage|help) ganesha_usage
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
ganesha_grace_start()
|
||||
{
|
||||
local result=""
|
||||
local resourcename=""
|
||||
local deadserver=""
|
||||
local tmpIFS=${IFS}
|
||||
|
||||
# logger "ganesha_grace_start()"
|
||||
# we're here because somewhere in the cluster one or more
|
||||
# of the ganesha.nfsds have died, triggering a floating IP
|
||||
# address to move. Resource constraint location rules ensure
|
||||
# that this is invoked before the floating IP is moved.
|
||||
if [ -d /proc/$(cat /var/run/ganesha.nfsd.pid) ]; then
|
||||
# my ganesha.nfsd is still running
|
||||
# find out which one died?
|
||||
|
||||
pcs status | grep dead_ip-1 | sort > /tmp/.pcs_status
|
||||
|
||||
result=$(diff /var/run/ganesha/pcs_status /tmp/.pcs_status | grep '^>')
|
||||
if [[ ${result} ]]; then
|
||||
# logger "ganesha_grace_start(), ${result}"
|
||||
IFS=$'\n'
|
||||
for line in ${result}; do
|
||||
resourcename=$(echo ${line} | cut -f 1 | cut -d ' ' -f 3)
|
||||
deadserver=${resourcename%"-dead_ip-1"}
|
||||
|
||||
if [[ ${deadserver} ]]; then
|
||||
# logger "ganesha_grace_start(), ${line}"
|
||||
# logger "ganesha_grace_start(), dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${deadserver}"
|
||||
dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${deadserver}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${deadserver} failed"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
IFS=${tmpIFS}
|
||||
fi
|
||||
|
||||
fi
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_grace_stop()
|
||||
{
|
||||
|
||||
# logger "ganesha_grace_stop()"
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_grace_monitor()
|
||||
{
|
||||
# logger "ganesha_grace_monitor()"
|
||||
pcs status | grep dead_ip-1 | sort > /var/run/ganesha/pcs_status
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_grace_validate()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_grace_validate
|
||||
|
||||
# logger "ganesha_grace ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION"
|
||||
|
||||
# Translate each action into the appropriate function call
|
||||
case $__OCF_ACTION in
|
||||
start) ganesha_grace_start
|
||||
;;
|
||||
stop) ganesha_grace_stop
|
||||
;;
|
||||
status|monitor) ganesha_grace_monitor
|
||||
;;
|
||||
*) ganesha_grace_usage
|
||||
exit $OCF_ERR_UNIMPLEMENTED
|
||||
;;
|
||||
esac
|
||||
|
||||
rc=$?
|
||||
|
||||
# The resource agent may optionally log a debug message
|
||||
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION returned $rc"
|
||||
exit $rc
|
||||
|
159
extras/ganesha/ocf/ganesha_mon
Normal file
159
extras/ganesha/ocf/ganesha_mon
Normal file
@ -0,0 +1,159 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2014 Anand Subramanian anands@redhat.com
|
||||
# Copyright (c) 2015 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it would be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
#
|
||||
# Further, this software is distributed without any warranty that it is
|
||||
# free of the rightful claim of any third person regarding infringement
|
||||
# or the like. Any license provided herein, whether implied or
|
||||
# otherwise, applies only to this software file. Patent licenses, if
|
||||
# any, provided herein do not apply to combinations of this program with
|
||||
# other software, or any other product whatsoever.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
#
|
||||
#
|
||||
|
||||
# Initialization:
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||
. $OCF_DEBUG_LIBRARY
|
||||
else
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
fi
|
||||
|
||||
ganesha_meta_data() {
|
||||
cat <<END
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
<resource-agent name="ganesha_mon">
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
This Linux-specific resource agent acts as a dummy
|
||||
resource agent for nfs-ganesha.
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
|
||||
|
||||
<parameters>
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
<action name="start" timeout="40s" />
|
||||
<action name="stop" timeout="40s" />
|
||||
<action name="status" depth="0" timeout="20s" interval="10s" />
|
||||
<action name="monitor" depth="0" timeout="10s" interval="10s" />
|
||||
<action name="meta-data" timeout="20s" />
|
||||
</actions>
|
||||
</resource-agent>
|
||||
END
|
||||
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_mon_usage() {
|
||||
echo "ganesha.nfsd USAGE"
|
||||
}
|
||||
|
||||
# Make sure meta-data and usage always succeed
|
||||
case $__OCF_ACTION in
|
||||
meta-data) ganesha_meta_data
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
usage|help) ganesha_usage
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
ganesha_mon_start()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_mon_stop()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_mon_monitor()
|
||||
{
|
||||
local short_host=$(hostname -s)
|
||||
|
||||
if [ -d /proc/$(cat /var/run/ganesha.nfsd.pid) ]; then
|
||||
# logger "ganesha_mon_monitor(), attrd_updater -n ganesha-active -v 1"
|
||||
pcs resource delete ${short_host}-dead_ip-1
|
||||
# if [ $? -ne 0 ]; then
|
||||
# logger "warning: pcs resource delete ${short_host}-dead_ip-1"
|
||||
# fi
|
||||
|
||||
sleep 1
|
||||
|
||||
attrd_updater -n ganesha-active -v 1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: attrd_updater -n ganesha-active -v 1 failed"
|
||||
fi
|
||||
|
||||
else
|
||||
# logger "ganesha_mon_monitor(), attrd_updater --D -n ganesha-active"
|
||||
|
||||
pcs resource create ${short_host}-dead_ip-1 ocf:heartbeat:Dummy
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create ${short_host}-dead_ip-1 ocf:heartbeat:Dummy failed"
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
|
||||
attrd_updater -D -n ganesha-active
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: attrd_updater -D -n ganesha-active failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_mon_validate()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_mon_validate
|
||||
|
||||
# logger "ganesha_mon ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION"
|
||||
|
||||
# Translate each action into the appropriate function call
|
||||
case $__OCF_ACTION in
|
||||
start) ganesha_mon_start
|
||||
;;
|
||||
stop) ganesha_mon_stop
|
||||
;;
|
||||
status|monitor) ganesha_mon_monitor
|
||||
;;
|
||||
*) ganesha_mon_usage
|
||||
exit $OCF_ERR_UNIMPLEMENTED
|
||||
;;
|
||||
esac
|
||||
|
||||
rc=$?
|
||||
|
||||
# The resource agent may optionally log a debug message
|
||||
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION returned $rc"
|
||||
exit $rc
|
||||
|
196
extras/ganesha/ocf/ganesha_nfsd
Normal file
196
extras/ganesha/ocf/ganesha_nfsd
Normal file
@ -0,0 +1,196 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2014 Anand Subramanian anands@redhat.com
|
||||
# Copyright (c) 2015 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it would be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
#
|
||||
# Further, this software is distributed without any warranty that it is
|
||||
# free of the rightful claim of any third person regarding infringement
|
||||
# or the like. Any license provided herein, whether implied or
|
||||
# otherwise, applies only to this software file. Patent licenses, if
|
||||
# any, provided herein do not apply to combinations of this program with
|
||||
# other software, or any other product whatsoever.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write the Free Software Foundation,
|
||||
# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
#
|
||||
#
|
||||
|
||||
# Initialization:
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
|
||||
if [ -n "$OCF_DEBUG_LIBRARY" ]; then
|
||||
. $OCF_DEBUG_LIBRARY
|
||||
else
|
||||
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
|
||||
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
|
||||
fi
|
||||
|
||||
ganesha_meta_data() {
|
||||
cat <<END
|
||||
<?xml version="1.0"?>
|
||||
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
|
||||
<resource-agent name="ganesha_nfsd">
|
||||
<version>1.0</version>
|
||||
|
||||
<longdesc lang="en">
|
||||
This Linux-specific resource agent acts as a dummy
|
||||
resource agent for nfs-ganesha.
|
||||
</longdesc>
|
||||
|
||||
<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
|
||||
|
||||
<parameters>
|
||||
<parameter name="ha_vol_name">
|
||||
<longdesc lang="en">HA State Volume Name</longdesc>
|
||||
<shortdesc lang="en">HA_State Volume Name</shortdesc>
|
||||
<content type="string" default="" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="ha_vol_mnt">
|
||||
<longdesc lang="en">HA State Volume Mount Point</longdesc>
|
||||
<shortdesc lang="en">HA_State Volume Mount Point</shortdesc>
|
||||
<content type="string" default="" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="ha_vol_server">
|
||||
<longdesc lang="en">HA State Volume Server</longdesc>
|
||||
<shortdesc lang="en">HA_State Volume Server</shortdesc>
|
||||
<content type="string" default="" />
|
||||
</parameter>
|
||||
</parameters>
|
||||
|
||||
<actions>
|
||||
<action name="start" timeout="40s" />
|
||||
<action name="stop" timeout="40s" />
|
||||
<action name="status" depth="0" timeout="20s" interval="1m" />
|
||||
<action name="monitor" depth="0" timeout="10s" interval="1m" />
|
||||
<action name="meta-data" timeout="20s" />
|
||||
</actions>
|
||||
</resource-agent>
|
||||
END
|
||||
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_nfsd_usage() {
|
||||
echo "ganesha.nfsd USAGE"
|
||||
}
|
||||
|
||||
# Make sure meta-data and usage always succeed
|
||||
case $__OCF_ACTION in
|
||||
meta-data) ganesha_meta_data
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
usage|help) ganesha_usage
|
||||
exit $OCF_SUCCESS
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
ganesha_nfsd_start()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_nfsd_stop()
|
||||
{
|
||||
local mounted=""
|
||||
local mntptinuse=""
|
||||
local instance_host=""
|
||||
local short_host=""
|
||||
local resource_prefix=${OCF_RESOURCE_INSTANCE:0:9}
|
||||
|
||||
if [ "X${resource_prefix}X" = "Xnfs_startX" ]; then
|
||||
|
||||
mounted=$(mount | grep $OCF_RESKEY_ha_vol_name)
|
||||
mntptinuse=$(mount | grep -o $OCF_RESKEY_ha_vol_mnt)
|
||||
short_host=$(hostname -s)
|
||||
long_host=$(hostname)
|
||||
|
||||
if [[ ! ${mounted} ]]; then
|
||||
|
||||
if [ -d $OCF_RESKEY_ha_vol_mnt ]; then
|
||||
if [[ ${mntptinuse} ]]; then
|
||||
return $OCF_ERR_GENERIC
|
||||
fi
|
||||
else
|
||||
mkdir ${mntpt}
|
||||
fi
|
||||
|
||||
mount -t glusterfs $OCF_RESKEY_ha_vol_server:$OCF_RESKEY_ha_vol_name $OCF_RESKEY_ha_vol_mnt
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: mount -t glusterfs $OCF_RESKEY_ha_vol_server:$OCF_RESKEY_ha_vol_name $OCF_RESKEY_ha_vol_mnt failed"
|
||||
fi
|
||||
|
||||
mv /var/lib/nfs /var/lib/nfs.backup
|
||||
ln -s $OCF_RESKEY_ha_vol_mnt/${long_host}/nfs /var/lib/nfs
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: ln -s $OCF_RESKEY_ha_vol_mnt/${long_host}/nfs /var/lib/nfs failed"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
service nfs-ganesha start
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: service nfs-ganesha start failed"
|
||||
fi
|
||||
else
|
||||
umount $OCF_RESKEY_ha_vol_mnt
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: umount $OCF_RESKEY_ha_vol_mnt failed"
|
||||
fi
|
||||
|
||||
service nfs-ganesha stop
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: service nfs-ganesha stop failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_nfsd_monitor()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_nfsd_validate()
|
||||
{
|
||||
return $OCF_SUCCESS
|
||||
}
|
||||
|
||||
ganesha_nfsd_validate
|
||||
|
||||
# logger "ganesha_nfsd ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION"
|
||||
|
||||
# Translate each action into the appropriate function call
|
||||
case $__OCF_ACTION in
|
||||
start) ganesha_nfsd_start
|
||||
;;
|
||||
stop) ganesha_nfsd_stop
|
||||
;;
|
||||
status|monitor) ganesha_nfsd_monitor
|
||||
;;
|
||||
*) ganesha_nfsd_usage
|
||||
exit $OCF_ERR_UNIMPLEMENTED
|
||||
;;
|
||||
esac
|
||||
|
||||
rc=$?
|
||||
|
||||
# The resource agent may optionally log a debug message
|
||||
ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION returned $rc"
|
||||
exit $rc
|
||||
|
4
extras/ganesha/scripts/Makefile.am
Normal file
4
extras/ganesha/scripts/Makefile.am
Normal file
@ -0,0 +1,4 @@
|
||||
EXTRA_DIST= ganesha-ha.sh dbus-send.sh create-export-ganesha.sh
|
||||
|
||||
scriptsdir = $(libexecdir)/ganesha
|
||||
scripts_DATA = create-export-ganesha.sh dbus-send.sh ganesha-ha.sh
|
55
extras/ganesha/scripts/create-export-ganesha.sh
Executable file
55
extras/ganesha/scripts/create-export-ganesha.sh
Executable file
@ -0,0 +1,55 @@
|
||||
#/bin/bash
|
||||
|
||||
#This script is called by glusterd when the user
|
||||
#tries to export a volume via NFS-Ganesha.
|
||||
#An export file specific to a volume
|
||||
#is created in GANESHA_DIR/exports.
|
||||
|
||||
GANESHA_DIR=$1
|
||||
VOL=$2
|
||||
|
||||
function check_cmd_status()
|
||||
{
|
||||
if [ "$1" != "0" ]
|
||||
then
|
||||
rm -rf $GANESHA_DIR/exports/export.$VOL.conf
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
if [ ! -d "$GANESHA_DIR/exports" ];
|
||||
then
|
||||
mkdir $GANESHA_DIR/exports
|
||||
check_cmd_status `echo $?`
|
||||
fi
|
||||
|
||||
CONF=$(cat /etc/sysconfig/ganesha | grep "CONFFILE" | cut -f 2 -d "=")
|
||||
check_cmd_status `echo $?`
|
||||
|
||||
|
||||
function write_conf()
|
||||
{
|
||||
echo -e "# WARNING : Using Gluster CLI will overwrite manual
|
||||
# changes made to this file. To avoid it, edit the
|
||||
# file, copy it over to all the NFS-Ganesha nodes
|
||||
# and run ganesha-ha.sh --refresh-config."
|
||||
|
||||
echo "EXPORT{"
|
||||
echo " Export_Id = 1;"
|
||||
echo " FSAL {"
|
||||
echo " name = "GLUSTER";"
|
||||
echo " hostname=\"localhost\";"
|
||||
echo " volume=\"$VOL\";"
|
||||
echo " }"
|
||||
echo " Access_type = RW;"
|
||||
echo ' Squash="No_root_squash";'
|
||||
echo " Pseudo=\"/$VOL\";"
|
||||
echo ' Protocols = "3,4" ;'
|
||||
echo ' Transports = "UDP,TCP";'
|
||||
echo ' SecType = "sys";'
|
||||
echo " }"
|
||||
}
|
||||
|
||||
write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
|
||||
echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
|
74
extras/ganesha/scripts/dbus-send.sh
Executable file
74
extras/ganesha/scripts/dbus-send.sh
Executable file
@ -0,0 +1,74 @@
|
||||
#/bin/bash
|
||||
|
||||
declare -i EXPORT_ID
|
||||
GANESHA_DIR=$1
|
||||
OPTION=$1
|
||||
VOL=$2
|
||||
|
||||
function check_cmd_status()
|
||||
{
|
||||
if [ "$1" != "0" ]
|
||||
then
|
||||
rm -rf $GANESHA_DIR/exports/export.$VOL.conf
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
#This function keeps track of export IDs and increments it with every new entry
|
||||
function dynamic_export_add()
|
||||
{
|
||||
count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
|
||||
if [ "$count" = "1" ] ;
|
||||
then
|
||||
EXPORT_ID=2
|
||||
else
|
||||
#if [ -s /var/lib/ganesha/export_removed ];
|
||||
# then
|
||||
# EXPORT_ID=`head -1 /var/lib/ganesha/export_removed`
|
||||
# sed -i -e "1d" /var/lib/ganesha/export_removed
|
||||
# else
|
||||
|
||||
EXPORT_ID=`cat $GANESHA_DIR/.export_added`
|
||||
check_cmd_status `echo $?`
|
||||
EXPORT_ID=EXPORT_ID+1
|
||||
#fi
|
||||
fi
|
||||
echo $EXPORT_ID > $GANESHA_DIR/.export_added
|
||||
check_cmd_status `echo $?`
|
||||
sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
|
||||
$GANESHA_DIR/exports/export.$VOL.conf
|
||||
check_cmd_status `echo $?`
|
||||
dbus-send --print-reply --system \
|
||||
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
|
||||
org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
|
||||
string:"EXPORT(Path=/$VOL)"
|
||||
|
||||
}
|
||||
|
||||
#This function removes an export dynamically(uses the export_id of the export)
|
||||
function dynamic_export_remove()
|
||||
{
|
||||
removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
|
||||
grep Export_Id | cut -d " " -f3`
|
||||
echo $removed_id
|
||||
check_cmd_status `echo $?`
|
||||
dbus-send --print-reply --system \
|
||||
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
|
||||
org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
|
||||
check_cmd_status `echo $?`
|
||||
rm -rf $GANESHA_DIR/exports/export.$VOL.conf
|
||||
|
||||
}
|
||||
|
||||
if [ "$OPTION" = "on" ];
|
||||
then
|
||||
dynamic_export_add $@
|
||||
check_cmd_status `echo $?`
|
||||
fi
|
||||
|
||||
if [ "$OPTION" = "off" ];
|
||||
then
|
||||
dynamic_export_remove $@
|
||||
check_cmd_status `echo $?`
|
||||
fi
|
||||
|
665
extras/ganesha/scripts/ganesha-ha.sh
Executable file
665
extras/ganesha/scripts/ganesha-ha.sh
Executable file
@ -0,0 +1,665 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Pacemaker+Corosync High Availability for NFS-Ganesha
|
||||
#
|
||||
# setup, teardown, add-node, delete-node, refresh-config, and status
|
||||
#
|
||||
# Each participating node in the cluster is assigned a virtual IP (VIP)
|
||||
# which fails over to another node when its associated ganesha.nfsd dies
|
||||
# for any reason. After the VIP is moved to another node all the
|
||||
# ganesha.nfsds are send a signal using DBUS to put them into NFS GRACE.
|
||||
#
|
||||
# There are six resource agent types used: ganesha_mon, ganesha_grace,
|
||||
# ganesha_nfsd, IPaddr, and Dummy. ganesha_mon is used to monitor the
|
||||
# ganesha.nfsd. ganesha_grace is used to send the DBUS signal to put
|
||||
# the remaining ganesha.nfsds into grace. ganesha_nfsd is used to start
|
||||
# and stop the ganesha.nfsd during setup and teardown. IPaddr manages
|
||||
# the VIP. A Dummy resource named $hostname-trigger_ip-1 is used to
|
||||
# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
|
||||
# the new host.
|
||||
|
||||
HA_NUM_SERVERS=0
|
||||
HA_SERVERS=""
|
||||
HA_CONFDIR=""
|
||||
|
||||
RHEL6_PCS_CNAME_OPTION="--name"
|
||||
|
||||
check_cluster_exists()
|
||||
{
|
||||
local name=${1}
|
||||
local cluster_name=""
|
||||
|
||||
if [ -e /var/run/corosync.pid ]; then
|
||||
cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
|
||||
if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
|
||||
logger "$name already exists, exiting"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
determine_servers()
|
||||
{
|
||||
local cmd=${1}
|
||||
local num_servers=0
|
||||
local tmp_ifs=${IFS}
|
||||
local ha_servers=""
|
||||
|
||||
if [[ "X${cmd}X" != "XteardownX" ]]; then
|
||||
IFS=$','
|
||||
for server in ${HA_CLUSTER_NODES} ; do
|
||||
num_servers=$(expr ${num_servers} + 1)
|
||||
done
|
||||
IFS=${tmp_ifs}
|
||||
HA_NUM_SERVERS=${num_servers}
|
||||
HA_SERVERS="${HA_CLUSTER_NODES//,/ }"
|
||||
else
|
||||
ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
|
||||
IFS=$' '
|
||||
for server in ${ha_servers} ; do
|
||||
num_servers=$(expr ${num_servers} + 1)
|
||||
done
|
||||
IFS=${tmp_ifs}
|
||||
HA_NUM_SERVERS=${num_servers}
|
||||
HA_SERVERS="${ha_servers}"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_cluster()
|
||||
{
|
||||
local name=${1}
|
||||
local num_servers=${2}
|
||||
local servers=${3}
|
||||
local unclean=""
|
||||
|
||||
logger "setting up cluster ${name} with the following ${servers}"
|
||||
|
||||
pcs cluster auth ${servers}
|
||||
# fedora pcs cluster setup ${name} ${servers}
|
||||
# rhel6 pcs cluster setup --name ${name} ${servers}
|
||||
pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
|
||||
exit 1;
|
||||
fi
|
||||
pcs cluster start --all
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "pcs cluster start failed"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
sleep 3
|
||||
unclean=$(pcs status | grep -u "UNCLEAN")
|
||||
while [[ "${unclean}X" = "UNCLEANX" ]]; do
|
||||
sleep 1
|
||||
unclean=$(pcs status | grep -u "UNCLEAN")
|
||||
done
|
||||
sleep 1
|
||||
|
||||
if [ ${num_servers} -lt 3 ]; then
|
||||
pcs property set no-quorum-policy=ignore
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs property set no-quorum-policy=ignore failed"
|
||||
fi
|
||||
fi
|
||||
pcs property set stonith-enabled=false
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs property set stonith-enabled=false failed"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_finalize()
|
||||
{
|
||||
local cibfile=${1}
|
||||
local stopped=""
|
||||
|
||||
stopped=$(pcs status | grep -u "Stopped")
|
||||
while [[ "${stopped}X" = "StoppedX" ]]; do
|
||||
sleep 1
|
||||
stopped=$(pcs status | grep -u "Stopped")
|
||||
done
|
||||
|
||||
pcs status | grep dead_ip-1 | sort > /var/run/ganesha/pcs_status
|
||||
|
||||
}
|
||||
|
||||
teardown_cluster()
|
||||
{
|
||||
local name=${1}
|
||||
|
||||
logger "tearing down cluster $name"
|
||||
|
||||
for server in ${HA_SERVERS} ; do
|
||||
if [[ ${HA_CLUSTER_NODES} != *${server}* ]]; then
|
||||
logger "info: ${server} is not in config, removing"
|
||||
|
||||
pcs cluster stop ${server}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "pcs cluster stop ${server}"
|
||||
fi
|
||||
|
||||
pcs cluster node remove ${server}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster node remove ${server} failed"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# BZ 1193433 - pcs doesn't reload cluster.conf after modification
|
||||
# after teardown completes, a subsequent setup will appear to have
|
||||
# 'remembered' the deleted node. You can work around this by
|
||||
# issuing another `pcs cluster node remove $node`,
|
||||
# `crm_node -f -R $server`, or
|
||||
# `cibadmin --delete --xml-text '<node id="$server"
|
||||
# uname="$server"/>'
|
||||
|
||||
pcs cluster stop --all
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning pcs cluster stop --all failed"
|
||||
fi
|
||||
|
||||
pcs cluster destroy
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "error pcs cluster destroy failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
do_create_virt_ip_constraints()
|
||||
{
|
||||
local cibfile=${1}; shift
|
||||
local primary=${1}; shift
|
||||
local weight="1000"
|
||||
|
||||
# first a constraint location rule that says the VIP must be where
|
||||
# there's a ganesha.nfsd running
|
||||
pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1 failed"
|
||||
fi
|
||||
|
||||
# then a set of constraint location prefers to set the prefered order
|
||||
# for where a VIP should move
|
||||
while [[ ${1} ]]; do
|
||||
pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight} failed"
|
||||
fi
|
||||
weight=$(expr ${weight} + 1000)
|
||||
shift
|
||||
done
|
||||
# and finally set the highest preference for the VIP to its home node
|
||||
# default weight when created is/was 100.
|
||||
# on Fedora setting appears to be additive, so to get the desired
|
||||
# value we adjust the weight
|
||||
# weight=$(expr ${weight} - 100)
|
||||
pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight} failed"
|
||||
fi
|
||||
}
|
||||
|
||||
wrap_create_virt_ip_constraints()
|
||||
{
|
||||
local cibfile=${1}; shift
|
||||
local primary=${1}; shift
|
||||
local head=""
|
||||
local tail=""
|
||||
|
||||
# build a list of peers, e.g. for a four node cluster, for node1,
|
||||
# the result is "node2 node3 node4"; for node2, "node3 node4 node1"
|
||||
# and so on.
|
||||
while [[ ${1} ]]; do
|
||||
if [ "${1}" = "${primary}" ]; then
|
||||
shift
|
||||
while [[ ${1} ]]; do
|
||||
tail=${tail}" "${1}
|
||||
shift
|
||||
done
|
||||
else
|
||||
head=${head}" "${1}
|
||||
fi
|
||||
shift
|
||||
done
|
||||
do_create_virt_ip_constraints ${cibfile} ${primary} ${tail} ${head}
|
||||
}
|
||||
|
||||
create_virt_ip_constraints()
|
||||
{
|
||||
local cibfile=${1}; shift
|
||||
while [[ ${1} ]]; do
|
||||
wrap_create_virt_ip_constraints ${cibfile} ${1} ${HA_SERVERS}
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
setup_create_resources()
|
||||
{
|
||||
local cibfile=$(mktemp -u)
|
||||
|
||||
# mount the HA-state volume and start ganesha.nfsd on all nodes
|
||||
pcs resource create nfs_start ganesha_nfsd ha_vol_name=${HA_VOL_NAME} ha_vol_mnt=${HA_VOL_MNT} ha_vol_server=${HA_VOL_SERVER} --clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create nfs_start ganesha_nfsd --clone failed"
|
||||
fi
|
||||
sleep 1
|
||||
# cloned resources seem to never have their start() invoked when they
|
||||
# are created, but stop() is invoked when they are destroyed. Why???.
|
||||
# No matter, we don't want this resource agent hanging around anyway
|
||||
pcs resource delete nfs_start-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete nfs_start-clone failed"
|
||||
fi
|
||||
|
||||
pcs resource create nfs-mon ganesha_mon --clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create nfs-mon ganesha_mon --clone failed"
|
||||
fi
|
||||
|
||||
pcs resource create nfs-grace ganesha_grace --clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create nfs-grace ganesha_grace --clone failed"
|
||||
fi
|
||||
|
||||
pcs cluster cib ${cibfile}
|
||||
|
||||
while [[ ${1} ]]; do
|
||||
|
||||
# ipaddr=$(grep ^${1} ${HA_CONFIG_FILE} | cut -d = -f 2)
|
||||
ipaddrx="VIP_${1//-/_}"
|
||||
|
||||
ipaddr=${!ipaddrx}
|
||||
|
||||
pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=10s failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} resource create ${1}-trigger_ip-1 ocf:heartbeat:Dummy
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create ${1}-trigger_ip-1 ocf:heartbeat:Dummy failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint colocation add ${1}-cluster_ip-1 with ${1}-trigger_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint colocation add ${1}-cluster_ip-1 with ${1}-trigger_ip-1 failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order ${1}-trigger_ip-1 then nfs-grace-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order ${1}-trigger_ip-1 then nfs-grace-clone failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
|
||||
fi
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
|
||||
|
||||
pcs cluster cib-push ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning pcs cluster cib-push ${cibfile} failed"
|
||||
fi
|
||||
rm -f ${cibfile}
|
||||
}
|
||||
|
||||
teardown_resources()
|
||||
{
|
||||
# local mntpt=$(grep ha-vol-mnt ${HA_CONFIG_FILE} | cut -d = -f 2)
|
||||
|
||||
# unmount the HA-state volume and terminate ganesha.nfsd on all nodes
|
||||
pcs resource create nfs_stop ganesha_nfsd ha_vol_name=dummy ha_vol_mnt=${HA_VOL_MNT} ha_vol_server=dummy --clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create nfs_stop ganesha_nfsd --clone failed"
|
||||
fi
|
||||
sleep 1
|
||||
# cloned resources seem to never have their start() invoked when they
|
||||
# are created, but stop() is invoked when they are destroyed. Why???.
|
||||
pcs resource delete nfs_stop-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete nfs_stop-clone failed"
|
||||
fi
|
||||
|
||||
while [[ ${1} ]]; do
|
||||
pcs resource delete ${1}-cluster_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete ${1}-cluster_ip-1 failed"
|
||||
fi
|
||||
pcs resource delete ${1}-trigger_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete ${1}-trigger_ip-1 failed"
|
||||
fi
|
||||
pcs resource delete ${1}-dead_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "info: pcs resource delete ${1}-dead_ip-1 failed"
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# delete -clone resource agents
|
||||
pcs resource delete nfs-mon-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete nfs-mon-clone failed"
|
||||
fi
|
||||
|
||||
pcs resource delete nfs-grace-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete nfs-grace-clone failed"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
recreate_resources()
|
||||
{
|
||||
local cibfile=${1}; shift
|
||||
local add_node=${1}; shift
|
||||
local add_vip=${1}; shift
|
||||
|
||||
while [[ ${1} ]]; do
|
||||
|
||||
# ipaddr=$(grep ^${1} ${HA_CONFIG_FILE} | cut -d = -f 2)
|
||||
ipaddrx="VIP_${1//-/_}"
|
||||
|
||||
ipaddr=${!ipaddrx}
|
||||
|
||||
pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=10s failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} resource create ${1}-trigger_ip-1 ocf:heartbeat:Dummy
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create ${1}-trigger_ip-1 ocf:heartbeat:Dummy failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint colocation add ${1}-cluster_ip-1 with ${1}-trigger_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint colocation add ${1}-cluster_ip-1 with ${1}-trigger_ip-1 failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order ${1}-trigger_ip-1 then nfs-grace-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order ${1}-trigger_ip-1 then nfs-grace-clone failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
|
||||
fi
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=15s
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=10s failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} resource create ${add_node}-trigger_ip-1 ocf:heartbeat:Dummy
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource create ${add_node}-trigger_ip-1 ocf:heartbeat:Dummy failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint colocation add ${add_node}-cluster_ip-1 with ${add_node}-trigger_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint colocation add ${add_node}-cluster_ip-1 with ${add_node}-trigger_ip-1 failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order ${add_node}-trigger_ip-1 then nfs-grace-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order ${add_node}-trigger_ip-1 then nfs-grace-clone failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
clear_and_recreate_resources()
|
||||
{
|
||||
local cibfile=${1}; shift
|
||||
local add_node=${1}; shift
|
||||
local add_vip=${1}; shift
|
||||
|
||||
while [[ ${1} ]]; do
|
||||
|
||||
pcs -f ${cibfile} resource delete ${1}-cluster_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs -f ${cibfile} resource delete ${1}-cluster_ip-1"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} resource delete ${1}-trigger_ip-1
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs -f ${cibfile} resource delete ${1}-trigger_ip-1"
|
||||
fi
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
recreate_resources ${cibfile} ${add_node} ${add_vip} ${HA_SERVERS}
|
||||
|
||||
}
|
||||
|
||||
|
||||
addnode_create_resources()
|
||||
{
|
||||
local add_node=${1}; shift
|
||||
local add_vip=${1}; shift
|
||||
local cibfile=$(mktemp -u)
|
||||
|
||||
# mount the HA-state volume and start ganesha.nfsd on the new node
|
||||
pcs cluster cib ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib ${cibfile} failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} resource create nfs_start-${add_node} ganesha_nfsd ha_vol_name=${HA_VOL_NAME} ha_vol_mnt=${HA_VOL_MNT} ha_vol_server=${HA_VOL_SERVER}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs -f ${cibfile} resource create nfs_start-${add_node} ganesha_nfsd ha_vol_name=${HA_VOL_NAME} ha_vol_mnt=${HA_VOL_MNT} ha_vol_server=${HA_VOL_SERVER} failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${newnode}=INFINITY
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs -f ${cibfile} constraint location nfs_start-${add_node} prefers ${newnode}=INFINITY failed"
|
||||
fi
|
||||
|
||||
pcs -f ${cibfile} constraint order nfs_start-${add_node} then nfs-mon-clone
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs -f ${cibfile} constraint order nfs_start-${add_node} then nfs-mon-clone failed"
|
||||
fi
|
||||
|
||||
pcs cluster cib-push ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib-push ${cibfile} failed"
|
||||
fi
|
||||
|
||||
rm -f ${cibfile}
|
||||
|
||||
# start HA on the new node
|
||||
pcs cluster start ${add_node}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster start ${add_node} failed"
|
||||
fi
|
||||
|
||||
pcs resource delete nfs_start-${add_node}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs resource delete nfs_start-${add_node} failed"
|
||||
fi
|
||||
|
||||
|
||||
pcs cluster cib ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib ${cibfile} failed"
|
||||
fi
|
||||
|
||||
# delete all the -cluster_ip-1 and -trigger_ip-1 resources,
|
||||
# clearing their constraints, then create them again so we can
|
||||
# rejigger their constraints
|
||||
clear_and_recreate_resources ${cibfile} ${add_node} ${add_vip} ${HA_SERVERS}
|
||||
|
||||
HA_SERVERS="${HA_SERVERS} ${add_node}"
|
||||
|
||||
create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
|
||||
|
||||
pcs cluster cib-push ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib-push ${cibfile} failed"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
deletenode_delete_resources()
|
||||
{
|
||||
local node=${1}; shift
|
||||
|
||||
pcs cluster cib ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib ${cibfile} failed"
|
||||
fi
|
||||
|
||||
pcs cluster cib-push ${cibfile}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster cib-push ${cibfile} failed"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_state_volume()
|
||||
{
|
||||
local mnt=$(mktemp -d)
|
||||
local longname=""
|
||||
local shortname=""
|
||||
local dname=""
|
||||
|
||||
mount -t glusterfs ${HA_VOL_SERVER}:/${HA_VOL_NAME} ${mnt}
|
||||
|
||||
longname=$(hostname)
|
||||
dname=${longname#$(hostname -s)}
|
||||
|
||||
while [[ ${1} ]]; do
|
||||
mkdir ${mnt}/${1}${dname}
|
||||
mkdir ${mnt}/${1}${dname}/nfs
|
||||
mkdir ${mnt}/${1}${dname}/nfs/ganesha
|
||||
mkdir ${mnt}/${1}${dname}/nfs/statd
|
||||
touch ${mnt}/${1}${dname}/nfs/state
|
||||
mkdir ${mnt}/${1}${dname}/nfs/ganesha/v4recov
|
||||
mkdir ${mnt}/${1}${dname}/nfs/ganesha/v4old
|
||||
mkdir ${mnt}/${1}${dname}/nfs/statd/sm
|
||||
mkdir ${mnt}/${1}${dname}/nfs/statd/sm.bak
|
||||
mkdir ${mnt}/${1}${dname}/nfs/statd/state
|
||||
for server in ${HA_SERVERS} ; do
|
||||
if [ ${server} != ${1}${dname} ]; then
|
||||
ln -s ${mnt}/${server}/nfs/ganesha ${mnt}/${1}${dname}/nfs/ganesha/${server}
|
||||
ln -s ${mnt}/${server}/nfs/statd ${mnt}/${1}${dname}/nfs/statd/${server}
|
||||
fi
|
||||
done
|
||||
shift
|
||||
done
|
||||
|
||||
umount ${mnt}
|
||||
rmdir ${mnt}
|
||||
}
|
||||
|
||||
main()
|
||||
{
|
||||
local cmd=${1}; shift
|
||||
HA_CONFDIR=${1}; shift
|
||||
local node=""
|
||||
local vip=""
|
||||
|
||||
. ${HA_CONFDIR}/ganesha-ha.conf
|
||||
|
||||
if [ -e /etc/os-release ]; then
|
||||
RHEL6_PCS_CNAME_OPTION=""
|
||||
fi
|
||||
|
||||
case "${cmd}" in
|
||||
|
||||
setup | --setup)
|
||||
logger "setting up ${HA_NAME}"
|
||||
|
||||
check_cluster_exists ${HA_NAME}
|
||||
|
||||
determine_servers "setup"
|
||||
|
||||
if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
|
||||
|
||||
# setup_state_volume ${HA_SERVERS}
|
||||
|
||||
setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
|
||||
|
||||
setup_create_resources ${HA_SERVERS}
|
||||
|
||||
setup_finalize
|
||||
else
|
||||
|
||||
logger "insufficient servers for HA, aborting"
|
||||
fi
|
||||
;;
|
||||
|
||||
teardown | --teardown)
|
||||
logger "tearing down ${HA_NAME}"
|
||||
|
||||
determine_servers "teardown"
|
||||
|
||||
teardown_resources ${HA_SERVERS}
|
||||
|
||||
teardown_cluster ${HA_NAME}
|
||||
;;
|
||||
|
||||
add | --add)
|
||||
node=${1}; shift
|
||||
vip=${1}; shift
|
||||
|
||||
logger "adding ${node} with ${vip} to ${HA_NAME}"
|
||||
|
||||
determine_servers "add"
|
||||
|
||||
pcs cluster node add ${node}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster node add ${node} failed"
|
||||
fi
|
||||
|
||||
addnode_create_resources ${node} ${vip}
|
||||
|
||||
;;
|
||||
|
||||
delete | --delete)
|
||||
node=${1}; shift
|
||||
|
||||
logger "deleting ${node} from ${HA_NAME}"
|
||||
|
||||
determine_servers "delete"
|
||||
|
||||
deletenode_delete_resources ${node}
|
||||
|
||||
pcs cluster node remove ${node}
|
||||
if [ $? -ne 0 ]; then
|
||||
logger "warning: pcs cluster node remove ${node} failed"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
status | --status)
|
||||
;;
|
||||
|
||||
refresh-config | --refresh-config)
|
||||
;;
|
||||
|
||||
*)
|
||||
logger "Usage: ganesha-ha.sh setup|teardown|add|delete|status"
|
||||
;;
|
||||
|
||||
esac
|
||||
}
|
||||
|
||||
main $*
|
||||
|
@ -349,6 +349,26 @@ is in user space and easily manageable.
|
||||
|
||||
This package provides support to FUSE based clients.
|
||||
|
||||
%package ganesha
|
||||
Summary: NFS-Ganesha configuration
|
||||
Group: Applications/File
|
||||
|
||||
Requires: %{name}-server = %{version}-%{release}
|
||||
Requires: nfs-ganesha-gluster
|
||||
Requires: pcs
|
||||
|
||||
%description ganesha
|
||||
GlusterFS is a distributed file-system capable of scaling to several
|
||||
petabytes. It aggregates various storage bricks over Infiniband RDMA
|
||||
or TCP/IP interconnect into one large parallel network file
|
||||
system. GlusterFS is one of the most sophisticated file systems in
|
||||
terms of features and extensibility. It borrows a powerful concept
|
||||
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
|
||||
is in user space and easily manageable.
|
||||
|
||||
This package provides the configuration and related files for using
|
||||
NFS-Ganesha as the NFS server using GlusterFS
|
||||
|
||||
%if ( 0%{!?_without_georeplication:1} )
|
||||
%package geo-replication
|
||||
Summary: GlusterFS Geo-replication
|
||||
@ -443,7 +463,7 @@ Group: System Environment/Base
|
||||
Group: Productivity/Clustering/HA
|
||||
%endif
|
||||
# for glusterd
|
||||
Requires: glusterfs-server
|
||||
Requires: %{name}-server
|
||||
# depending on the distribution, we need pacemaker or resource-agents
|
||||
Requires: %{_prefix}/lib/ocf/resource.d
|
||||
|
||||
@ -934,6 +954,11 @@ fi
|
||||
%endif
|
||||
%endif
|
||||
|
||||
%files ganesha
|
||||
%{_sysconfdir}/ganesha/*
|
||||
%{_libexecdir}/ganesha/*
|
||||
%{_prefix}/lib/ocf/resource.d/heartbeat/*
|
||||
|
||||
%if ( 0%{!?_without_georeplication:1} )
|
||||
%files geo-replication
|
||||
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
|
||||
@ -1054,6 +1079,9 @@ fi
|
||||
%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
|
||||
|
||||
%changelog
|
||||
* Tue Mar 17 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
|
||||
- glusterfs-ganesha sub-package
|
||||
|
||||
* Thu Mar 12 2015 Kotresh H R <khiremat@redhat.com>
|
||||
- gfind_missing_files tool is included (#1187140)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user