1
0
mirror of https://github.com/samba-team/samba.git synced 2025-02-23 09:57:40 +03:00

ctdb-scripts: Call out to ctdb_lvs helper from 91.lvs

To keep this commit comprehensible, 91.lvs and the CTDB CLI tool are
temporarily inconsistent.  The tool will be made consistent in a
subsequent commit.

LVS now uses a configuration file specified by CTDB_LVS_NODES and
supports the same slave-only syntax as CTDB_NATGW_NODES.  LVS also
uses new variable CTDB_LVS_PUBLIC_IFACE instead of
CTDB_PUBLIC_INTERFACE.

Update unit tests and documentation.

Note that the --lvs and --single-public-ip daemon options are no
longer used.  These will be removed and relevant documentation
updated in a subsequent commit.

Signed-off-by: Martin Schwenke <martin@meltin.net>
Reviewed-by: Amitay Isaacs <amitay@gmail.com>
This commit is contained in:
Martin Schwenke 2016-04-08 16:21:08 +10:00 committed by Amitay Isaacs
parent b7376861cb
commit ce33a7cb1e
7 changed files with 244 additions and 90 deletions

View File

@ -163,7 +163,6 @@ build_ctdb_options ()
maybe_set "--start-as-stopped " "$CTDB_START_AS_STOPPED" "yes"
maybe_set "--no-recmaster" "$CTDB_CAPABILITY_RECMASTER" "no"
maybe_set "--no-lmaster" "$CTDB_CAPABILITY_LMASTER" "no"
maybe_set "--lvs --single-public-ip" "$CTDB_LVS_PUBLIC_IP"
maybe_set "--script-log-level" "$CTDB_SCRIPT_LOG_LEVEL"
maybe_set "--max-persistent-check-errors" "$CTDB_MAX_PERSISTENT_CHECK_ERRORS"
}

View File

@ -8,16 +8,47 @@
loadconfig ctdb
[ -z "$CTDB_LVS_PUBLIC_IP" ] && exit 0
[ -z "$CTDB_PUBLIC_INTERFACE" ] && exit 0
[ -n "$CTDB_LVS_NODES" ] || exit 0
export CTDB_LVS_NODES
if ! type ipvsadm >/dev/null 2>&1 ; then
echo "LVS configured but ipvsadm not found"
exit 0
fi
lvs_slave_only ()
{
ctdb_get_ip_address
awk -v my_ip="$ip_address" \
'$1 == my_ip { if ($2 ~ "slave-only") { exit 0 } else { exit 1 } }' \
"$CTDB_LVS_NODES"
}
lvs_check_config ()
{
[ -r "$CTDB_LVS_NODES" ] || \
die "error: CTDB_LVS_NODES=${CTDB_LVS_NODES} unreadable"
[ -n "$CTDB_LVS_PUBLIC_IP" ] || \
die "Invalid configuration: CTDB_LVS_PUBLIC_IP not set"
if ! lvs_slave_only ; then
[ -n "$CTDB_LVS_PUBLIC_IFACE" ] || \
die "Invalid configuration: CTDB_LVS_PUBLIC_IFACE not set"
fi
if [ "$CTDB_PARTIALLY_ONLINE_INTERFACES" = "yes" ] ; then
die "Invalid configuration: CTDB_PARTIALLY_ONLINE_INTERFACES=yes incompatible with LVS"
fi
}
case "$1" in
setup)
lvs_check_config
;;
startup)
lvs_check_config
ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1
ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1
@ -30,6 +61,8 @@ startup)
;;
shutdown)
lvs_check_config
ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP"
ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP"
@ -39,14 +72,16 @@ shutdown)
;;
ipreallocated)
lvs_check_config
# Kill connections
ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1
ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1
kill_tcp_connections_local_only \
"$CTDB_PUBLIC_INTERFACE" "$CTDB_LVS_PUBLIC_IP"
"$CTDB_LVS_PUBLIC_IFACE" "$CTDB_LVS_PUBLIC_IP"
ctdb_get_pnn
lvsmaster=$(ctdb lvsmaster | sed -n -e 's/Node \([0-9]*\) is LVS master/\1/p')
lvsmaster=$("${CTDB_HELPER_BINDIR}/ctdb_lvs" master | sed -n -e 's/Node \([0-9]*\) is LVS master/\1/p')
if [ "$pnn" != "$lvsmaster" ] ; then
# This node is not the LVS master so change the IP address
# to have scope "host" so this node won't respond to ARPs
@ -63,7 +98,7 @@ ipreallocated)
ipvsadm -A -u "$CTDB_LVS_PUBLIC_IP" -p 1999999 -s lc
# Add all nodes (except this node) as LVS servers
ctdb lvs |
"${CTDB_HELPER_BINDIR}/ctdb_lvs" list |
awk -F: -v pnn="$pnn" '$1 != pnn { print $2 }' |
while read ip ; do
ipvsadm -a -t "$CTDB_LVS_PUBLIC_IP" -r $ip -g
@ -75,7 +110,7 @@ ipreallocated)
ipvsadm -a -u "$CTDB_LVS_PUBLIC_IP" -r 127.0.0.1
ctdb gratiousarp \
"$CTDB_LVS_PUBLIC_IP" "$CTDB_PUBLIC_INTERFACE" >/dev/null 2>&1
"$CTDB_LVS_PUBLIC_IP" "$CTDB_LVS_PUBLIC_IFACE" >/dev/null 2>&1
flush_route_cache
;;

View File

@ -436,21 +436,6 @@ Node 3:/usr/local/etc/ctdb/public_addresses
</listitem>
</varlistentry>
<varlistentry>
<term>LVS</term>
<listitem>
<para>
Indicates that a node is configued in Linux Virtual Server
(LVS) mode. In this mode the entire CTDB cluster uses one
single public address for the entire cluster instead of
using multiple public addresses in failover mode. This is
an alternative to using a load-balancing layer-4 switch.
See the <citetitle>LVS</citetitle> section for more
details.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
@ -477,25 +462,35 @@ Node 3:/usr/local/etc/ctdb/public_addresses
</para>
<para>
In this mode the cluster selects a set of nodes in the cluster
and loadbalance all client access to the LVS address across this
set of nodes. This set of nodes are all LVS capable nodes that
are HEALTHY, or if no HEALTHY nodes exists all LVS capable nodes
regardless of health status. LVS will however never loadbalance
traffic to nodes that are BANNED, STOPPED, DISABLED or
DISCONNECTED. The <command>ctdb lvs</command> command is used to
show which nodes are currently load-balanced across.
One extra LVS public address is assigned on the public network
to each LVS group. Each LVS group is a set of nodes in the
cluster that presents the same LVS address public address to the
outside world. Normally there would only be one LVS group
spanning an entire cluster, but in situations where one CTDB
cluster spans multiple physical sites it might be useful to have
one LVS group for each site. There can be multiple LVS groups
in a cluster but each node can only be member of one LVS group.
</para>
<para>
One of the these nodes are elected as the LVSMASTER. This node
receives all traffic from clients coming in to the LVS address
and multiplexes it across the internal network to one of the
nodes that LVS is using. When responding to the client, that
node will send the data back directly to the client, bypassing
the LVSMASTER node. The command <command>ctdb
lvsmaster</command> will show which node is the current
LVSMASTER.
Client access to the cluster is load-balanced across the HEALTHY
nodes in an LVS group. If no HEALTHY nodes exists then all
nodes in the group are used, regardless of health status. CTDB
will, however never load-balance LVS traffic to nodes that are
BANNED, STOPPED, DISABLED or DISCONNECTED. The <command>ctdb
lvs</command> command is used to show which nodes are currently
load-balanced across.
</para>
<para>
In each LVS group, one of the nodes is selected by CTDB to be
the LVS master. This node receives all traffic from clients
coming in to the LVS public address and multiplexes it across
the internal network to one of the nodes that LVS is using.
When responding to the client, that node will send the data back
directly to the client, bypassing the LVS master node. The
command <command>ctdb lvsmaster</command> will show which node
is the current LVS master.
</para>
<para>
@ -525,7 +520,7 @@ Node 3:/usr/local/etc/ctdb/public_addresses
</orderedlist>
</para>
<para>
<para>
This means that all incoming traffic to the cluster will pass
through one physical node, which limits scalability. You can
send more data to the LVS address that one physical node can
@ -565,20 +560,50 @@ Node 3:/usr/local/etc/ctdb/public_addresses
<para>
To activate LVS on a CTDB node you must specify the
<varname>CTDB_PUBLIC_INTERFACE</varname> and
<varname>CTDB_LVS_PUBLIC_IP</varname> configuration variables.
Setting the latter variable also enables the LVS capability on
the node at startup.
<varname>CTDB_LVS_PUBLIC_IFACE</varname>,
<varname>CTDB_LVS_PUBLIC_IP</varname> and
<varname>CTDB_LVS_NODES</varname> configuration variables.
<varname>CTDB_LVS_NODES</varname> specifies a file containing
the private address of all nodes in the current node's LVS
group.
</para>
<para>
Example:
<screen format="linespecific">
CTDB_PUBLIC_INTERFACE=eth1
CTDB_LVS_PUBLIC_IFACE=eth1
CTDB_LVS_PUBLIC_IP=10.1.1.237
CTDB_LVS_NODES=/usr/local/etc/ctdb/lvs_nodes
</screen>
</para>
<para>
Example <filename>/usr/local/etc/ctdb/lvs_nodes</filename>:
</para>
<screen format="linespecific">
192.168.1.2
192.168.1.3
192.168.1.4
</screen>
<para>
Normally any node in an LVS group can act as the LVS master.
Nodes that are highly loaded due to other demands maybe
flagged with the "slave-only" option in the
<varname>CTDB_LVS_NODES</varname> file to limit the LVS
functionality of those nodes.
</para>
<para>
LVS nodes file that excludes 192.168.1.4 from being
the LVS master node:
</para>
<screen format="linespecific">
192.168.1.2
192.168.1.3
192.168.1.4 slave-only
</screen>
</refsect2>
</refsect1>

View File

@ -302,16 +302,6 @@
</listitem>
</varlistentry>
<varlistentry>
<term>CTDB_LVS_PUBLIC_IP=<parameter>IPADDR</parameter></term>
<listitem>
<para>
No default. Corresponds to "<option>--lvs</option>
<option>--single-public-ip IPADDR"</option>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>CTDB_NODES=<parameter>FILENAME</parameter></term>
<listitem>
@ -817,6 +807,81 @@ CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=9000
</refsect2>
<refsect2>
<title>LVS</title>
<para>
For a general description see the <citetitle>LVS</citetitle>
section in <citerefentry><refentrytitle>ctdb</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
</para>
<refsect3>
<title>Eventscript</title>
<simplelist>
<member><filename>91.lvs</filename></member>
</simplelist>
</refsect3>
<variablelist>
<varlistentry>
<term>CTDB_LVS_NODES=<parameter>FILENAME</parameter></term>
<listitem>
<para>
FILENAME contains the list of nodes that belong to the
same LVS group.
</para>
<para>
File format:
<screen>
<parameter>IPADDR</parameter> <optional>slave-only</optional>
</screen>
</para>
<para>
IPADDR is the private IP address of each node in the LVS
group.
</para>
<para>
If "slave-only" is specified then the corresponding node
can not be the LVS master node. In this case
<varname>CTDB_LVS_PUBLIC_IFACE</varname> and
<varname>CTDB_LVS_PUBLIC_IP</varname> are optional and
unused.
</para>
<para>
No default, usually
<filename>/usr/local/etc/ctdb/lvs_nodes</filename> when enabled.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>CTDB_LVS_PUBLIC_IFACE=<parameter>INTERFACE</parameter></term>
<listitem>
<para>
INTERFACE is the network interface that clients will use
to connection to <varname>CTDB_LVS_PUBLIC_IP</varname>.
This is optional for slave-only nodes.
No default.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>CTDB_LVS_PUBLIC_IP=<parameter>IPADDR</parameter></term>
<listitem>
<para>
CTDB_LVS_PUBLIC_IP is the LVS public address. No
default.
</para>
</listitem>
</varlistentry>
</variablelist>
</refsect2>
<refsect2>
<title>MISCELLANEOUS NETWORK CONFIGURATION</title>

View File

@ -543,9 +543,6 @@ EOF
setup_ctdb_lvs ()
{
export CTDB_LVS_PUBLIC_IP="$1"
export CTDB_PUBLIC_INTERFACE="$2"
lvs_state_dir="${EVENTSCRIPTS_TESTS_VAR_DIR}/lvs"
mkdir -p "$lvs_state_dir"
@ -554,8 +551,12 @@ setup_ctdb_lvs ()
lvs_header=$(ipvsadm -l -n)
# Not an official configuration file, just used by the ctdb
# tool stub
export CTDB_LVS_PUBLIC_IP="$1"
export CTDB_LVS_PUBLIC_IFACE="$2"
[ -n "$CTDB_LVS_PUBLIC_IP" ] || return 0
[ -n "$CTDB_LVS_PUBLIC_IFACE" ] || return 0
export CTDB_LVS_NODES=$(mktemp --tmpdir="$lvs_state_dir")
export FAKE_CTDB_LVS_MASTER=""

View File

@ -236,16 +236,16 @@ ctdb_shutdown ()
######################################################################
# This is only used by the NAT gateway code at the moment, so use a
# hack. Assume that $CTDB_NATGW_NODES contains all nodes in the
# cluster (which is what current tests assume). Use the PNN to find
# the address from this file. The NAT gateway code only used the
# address, so just mark the node healthy.
# This is only used by the NAT and LVS gateway code at the moment, so
# use a hack. Assume that $CTDB_NATGW_NODES or $CTDB_LVS_NODES
# contains all nodes in the cluster (which is what current tests
# assume). Use the PNN to find the address from this file. The NAT
# gateway code only used the address, so just mark the node healthy.
ctdb_nodestatus ()
{
echo '|Node|IP|Disconnected|Banned|Disabled|Unhealthy|Stopped|Inactive|PartiallyOnline|ThisNode|'
_line=$(( $FAKE_CTDB_PNN + 1 ))
_ip=$(sed -e "${_line}p" "$CTDB_NATGW_NODES")
_ip=$(sed -e "${_line}p" "${CTDB_NATGW_NODES:-${CTDB_LVS_NODES}}")
echo "|${FAKE_CTDB_PNN}|${_ip}|0|0|0|0|0|0|0|Y|"
}
@ -358,28 +358,6 @@ EOF
######################################################################
ctdb_lvs_master ()
{
if [ -n "$FAKE_CTDB_LVS_MASTER" ] ; then
echo "Node ${FAKE_CTDB_LVS_MASTER} is LVS master"
return 0
else
echo "This is no LVS master"
return 255
fi
}
ctdb_lvs ()
{
_pnn=0
while read _ip _opts ; do
echo "${_pnn}:${_ip}"
_pnn=$(($_pnn + 1))
done <"$CTDB_LVS_NODES"
}
######################################################################
case "$1" in
gettickles)
setup_tickles
@ -477,7 +455,5 @@ case "$1" in
shutdown) ctdb_shutdown "$@";;
setvar) ctdb_setvar "$@" ;;
nodestatus) ctdb_nodestatus "$@" ;;
lvsmaster) ctdb_lvs_master "$@" ;;
lvs) ctdb_lvs "$@" ;;
*) not_implemented "$1" ;;
esac

View File

@ -0,0 +1,53 @@
#!/bin/sh
prog="ctdb_lvs"
# Print a message and exit.
die ()
{
echo "$1" >&2 ; exit ${2:-1}
}
not_implemented_exit_code=1
usage ()
{
cat >&2 <<EOF
Usage: $prog { master | list }
EOF
exit 1
}
not_implemented ()
{
echo "${prog}: command \"$1\" not implemented in stub" >&2
exit $not_implemented_exit_code
}
ctdb_lvs_master ()
{
if [ -n "$FAKE_CTDB_LVS_MASTER" ] ; then
echo "Node ${FAKE_CTDB_LVS_MASTER} is LVS master"
return 0
else
echo "This is no LVS master"
return 255
fi
}
ctdb_lvs_list ()
{
_pnn=0
while read _ip _opts ; do
echo "${_pnn}:${_ip}"
_pnn=$(($_pnn + 1))
done <"$CTDB_LVS_NODES"
}
######################################################################
case "$1" in
master) ctdb_lvs_master "$@" ;;
list) ctdb_lvs_list "$@" ;;
*) not_implemented "$1" ;;
esac