mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-22 17:35:59 +03:00
a9fc137fd1
When the init scripts are run from within systemd, the systemd needs to know the pidfile for it to work correctly when the daemon itself is killed. Otherwise, systemd keeps these services in "active" and "exited state" at the same time (it assumes RemainAfterExit=yes without the pidfile reference in chkconfig header). See also https://bugzilla.redhat.com/show_bug.cgi?id=971819#c5.
220 lines
4.5 KiB
Bash
220 lines
4.5 KiB
Bash
#!/bin/bash
|
|
#
|
|
# clvmd - Clustered LVM Daemon init script
|
|
#
|
|
# chkconfig: - 24 76
|
|
# description: Cluster daemon for userland logical volume management tools.
|
|
# pidfile: @CLVMD_PIDFILE@
|
|
#
|
|
# For Red-Hat-based distributions such as Fedora, RHEL, CentOS.
|
|
#
|
|
### BEGIN INIT INFO
|
|
# Provides: clvmd
|
|
# Required-Start: $local_fs@CLVMD_CMANAGERS@
|
|
# Required-Stop: $local_fs@CLVMD_CMANAGERS@
|
|
# Short-Description: This service is Clusterd LVM Daemon.
|
|
# Description: Cluster daemon for userland logical volume management tools.
|
|
### END INIT INFO
|
|
|
|
. /etc/rc.d/init.d/functions
|
|
|
|
DAEMON=clvmd
|
|
|
|
exec_prefix=@exec_prefix@
|
|
sbindir=@sbindir@
|
|
|
|
lvm_vgchange=${sbindir}/vgchange
|
|
lvm_vgdisplay=${sbindir}/vgdisplay
|
|
lvm_vgscan=${sbindir}/vgscan
|
|
lvm_lvdisplay=${sbindir}/lvdisplay
|
|
|
|
CLVMDOPTS="-T30"
|
|
|
|
[ -f /etc/sysconfig/cluster ] && . /etc/sysconfig/cluster
|
|
[ -f /etc/sysconfig/$DAEMON ] && . /etc/sysconfig/$DAEMON
|
|
|
|
[ -n "$CLVMD_CLUSTER_IFACE" ] && CLVMDOPTS="$CLVMDOPTS -I $CLVMD_CLUSTER_IFACE"
|
|
|
|
# allow up to $CLVMD_STOP_TIMEOUT seconds to clvmd to complete exit operations
|
|
# default to 10 seconds
|
|
|
|
[ -z $CLMVD_STOP_TIMEOUT ] && CLVMD_STOP_TIMEOUT=10
|
|
|
|
LOCK_FILE="/var/lock/subsys/$DAEMON"
|
|
|
|
# NOTE: replace this with vgs, once display filter per attr is implemented.
|
|
clustered_vgs() {
|
|
${lvm_vgdisplay} 2>/dev/null | \
|
|
awk 'BEGIN {RS="VG Name"} {if (/Clustered/) print $1;}'
|
|
}
|
|
|
|
clustered_active_lvs() {
|
|
for i in $(clustered_vgs); do
|
|
${lvm_lvdisplay} $i 2>/dev/null | \
|
|
awk 'BEGIN {RS="LV Name"} {if (/[^N^O^T] available/) print $1;}'
|
|
done
|
|
}
|
|
|
|
rh_status() {
|
|
status $DAEMON
|
|
}
|
|
|
|
rh_status_q() {
|
|
rh_status >/dev/null 2>&1
|
|
}
|
|
|
|
start()
|
|
{
|
|
if ! rh_status_q; then
|
|
echo -n "Starting $DAEMON: "
|
|
$DAEMON $CLVMDOPTS || return $?
|
|
echo
|
|
fi
|
|
|
|
# Refresh local cache.
|
|
#
|
|
# It's possible that new PVs were added to this, or other VGs
|
|
# while this node was down. So we run vgscan here to avoid
|
|
# any potential "Missing UUID" messages with subsequent
|
|
# LVM commands.
|
|
|
|
# The following step would be better and more informative to the user:
|
|
# 'action "Refreshing VG(s) local cache:" ${lvm_vgscan}'
|
|
# but it could show warnings such as:
|
|
# 'clvmd not running on node x-y-z Unable to obtain global lock.'
|
|
# and the action would be shown as FAILED when in reality it didn't.
|
|
# Ideally vgscan should have a startup mode that would not print
|
|
# unnecessary warnings.
|
|
|
|
${lvm_vgscan} > /dev/null 2>&1
|
|
|
|
action "Activating VG(s):" ${lvm_vgchange} -aay $LVM_VGS || return $?
|
|
|
|
touch $LOCK_FILE
|
|
|
|
return 0
|
|
}
|
|
|
|
wait_for_finish()
|
|
{
|
|
count=0
|
|
while [ "$count" -le "$CLVMD_STOP_TIMEOUT" ] && \
|
|
rh_status_q ]; do
|
|
sleep 1
|
|
count=$((count+1))
|
|
done
|
|
|
|
! rh_status_q
|
|
}
|
|
|
|
stop()
|
|
{
|
|
rh_status_q || return 0
|
|
|
|
[ -z "$LVM_VGS" ] && LVM_VGS="$(clustered_vgs)"
|
|
if [ -n "$LVM_VGS" ]; then
|
|
action "Deactivating clustered VG(s):" ${lvm_vgchange} -anl $LVM_VGS || return $?
|
|
fi
|
|
|
|
action "Signaling $DAEMON to exit" kill -TERM $(pidofproc $DAEMON) || return $?
|
|
|
|
# wait half second before we start the waiting loop or we will show
|
|
# the loop more time than really necessary
|
|
usleep 500000
|
|
|
|
# clvmd could take some time to stop
|
|
rh_status_q && action "Waiting for $DAEMON to exit:" wait_for_finish
|
|
|
|
if rh_status_q; then
|
|
echo -n "$DAEMON failed to exit"
|
|
failure
|
|
echo
|
|
return 1
|
|
else
|
|
echo -n "$DAEMON terminated"
|
|
success
|
|
echo
|
|
fi
|
|
|
|
rm -f $LOCK_FILE
|
|
|
|
return 0
|
|
}
|
|
|
|
reload() {
|
|
rh_status_q || exit 7
|
|
action "Reloading $DAEMON configuration: " $DAEMON -R || return $?
|
|
}
|
|
|
|
restart() {
|
|
# if stop fails, restart will return the error and not attempt
|
|
# another start. Even if start is protected by rh_status_q,
|
|
# that would avoid spawning another daemon, it would try to
|
|
# reactivate the VGs.
|
|
|
|
# Try to get clvmd to restart itself. This will preserve
|
|
# exclusive LV locks
|
|
action "Restarting $DAEMON: " $DAEMON -S
|
|
|
|
# If that fails then do a normal stop & restart
|
|
if [ $? != 0 ]; then
|
|
stop && start
|
|
return $?
|
|
else
|
|
touch $LOCK_FILE
|
|
return 0
|
|
fi
|
|
}
|
|
|
|
[ "$EUID" != "0" ] && {
|
|
echo "clvmd init script can only be executed as root user"
|
|
exit 4
|
|
}
|
|
|
|
# See how we were called.
|
|
case "$1" in
|
|
start)
|
|
start
|
|
rtrn=$?
|
|
;;
|
|
|
|
stop)
|
|
stop
|
|
rtrn=$?
|
|
;;
|
|
|
|
restart|force-reload)
|
|
restart
|
|
rtrn=$?
|
|
;;
|
|
|
|
condrestart|try-restart)
|
|
rh_status_q || exit 0
|
|
restart
|
|
rtrn=$?
|
|
;;
|
|
|
|
reload)
|
|
reload
|
|
rtrn=$?
|
|
;;
|
|
|
|
status)
|
|
rh_status
|
|
rtrn=$?
|
|
if [ $rtrn = 0 ]; then
|
|
cvgs="$(clustered_vgs)"
|
|
echo Clustered Volume Groups: ${cvgs:-"(none)"}
|
|
clvs="$(clustered_active_lvs)"
|
|
echo Active clustered Logical Volumes: ${clvs:-"(none)"}
|
|
fi
|
|
;;
|
|
|
|
*)
|
|
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
|
rtrn=2
|
|
;;
|
|
esac
|
|
|
|
exit $rtrn
|