2007-06-01 14:54:26 +04:00
#!/bin/sh
# event strict to manage lockd and statd in a cluster environment
2007-06-01 15:20:05 +04:00
. /etc/ctdb/functions
2007-06-03 16:07:07 +04:00
loadconfig nfs
2007-06-01 14:54:26 +04:00
2007-06-05 06:43:35 +04:00
[ "$CTDB_MANAGES_NFSLOCK" = "yes" ] || exit 0
2007-06-02 10:44:15 +04:00
2007-06-03 08:39:27 +04:00
[ -z "$STATD_SHARED_DIRECTORY" ] && exit 0
2007-06-01 14:54:26 +04:00
cmd="$1"
shift
case $cmd in
startup)
/bin/mkdir -p /etc/ctdb/state/statd/ip
2007-06-03 08:39:27 +04:00
ctdb_wait_directories "nfslock" "$STATD_SHARED_DIRECTORY"
2007-06-05 09:18:37 +04:00
# make sure the service is stopped first
service nfslock stop > /dev/null 2>&1
2007-06-02 10:44:15 +04:00
service nfslock start
2007-06-01 14:54:26 +04:00
;;
shutdown)
2007-06-02 10:44:15 +04:00
service nfslock stop
2007-06-01 14:54:26 +04:00
;;
takeip)
ip=$2
echo $ip >> /etc/ctdb/state/statd/restart
# having a list of what IPs we have allows statd to do the right
# thing via /etc/ctdb/statd-callout
/bin/touch /etc/ctdb/state/statd/ip/$ip
;;
releaseip)
ip=$2
echo $ip >> /etc/ctdb/state/statd/restart
/bin/rm -f /etc/ctdb/state/statd/ip/$ip
;;
recovered)
# if we have taken or released any ips we must send out
# statd notifications to recover lost nfs locks
[ -x /etc/ctdb/statd-callout ] && [ -f /etc/ctdb/state/statd/restart ] && {
/etc/ctdb/statd-callout notify &
} >/dev/null 2>&1
/bin/rm -f /etc/ctdb/state/statd/restart
;;
2007-06-06 06:08:42 +04:00
monitor)
# check that lockd responds to rpc requests
ctdb_check_rpc "statd" 100024 1
ctdb_check_rpc "lockd" 100021 1
ctdb_check_directories "statd" $STATD_SHARED_DIRECTORY
;;
2007-06-01 14:54:26 +04:00
esac
exit 0