From ce33a7cb1e6d91b56bc9261efb4bf6d53371f77e Mon Sep 17 00:00:00 2001 From: Martin Schwenke Date: Fri, 8 Apr 2016 16:21:08 +1000 Subject: [PATCH] ctdb-scripts: Call out to ctdb_lvs helper from 91.lvs To keep this commit comprehensible, 91.lvs and the CTDB CLI tool are temporarily inconsistent. The tool will be made consistent in a subsequent commit. LVS now uses a configuration file specified by CTDB_LVS_NODES and supports the same slave-only syntax as CTDB_NATGW_NODES. LVS also uses new variable CTDB_LVS_PUBLIC_IFACE instead of CTDB_PUBLIC_INTERFACE. Update unit tests and documentation. Note that the --lvs and --single-public-ip daemon options are no longer used. These will be removed and relevant documentation updated in a subsequent commit. Signed-off-by: Martin Schwenke Reviewed-by: Amitay Isaacs --- ctdb/config/ctdbd_wrapper | 1 - ctdb/config/events.d/91.lvs | 47 +++++++++-- ctdb/doc/ctdb.7.xml | 101 ++++++++++++++--------- ctdb/doc/ctdbd.conf.5.xml | 85 ++++++++++++++++--- ctdb/tests/eventscripts/scripts/local.sh | 11 +-- ctdb/tests/eventscripts/stubs/ctdb | 36 ++------ ctdb/tests/eventscripts/stubs/ctdb_lvs | 53 ++++++++++++ 7 files changed, 244 insertions(+), 90 deletions(-) create mode 100755 ctdb/tests/eventscripts/stubs/ctdb_lvs diff --git a/ctdb/config/ctdbd_wrapper b/ctdb/config/ctdbd_wrapper index be251e6472c..b1f71c343b3 100755 --- a/ctdb/config/ctdbd_wrapper +++ b/ctdb/config/ctdbd_wrapper @@ -163,7 +163,6 @@ build_ctdb_options () maybe_set "--start-as-stopped " "$CTDB_START_AS_STOPPED" "yes" maybe_set "--no-recmaster" "$CTDB_CAPABILITY_RECMASTER" "no" maybe_set "--no-lmaster" "$CTDB_CAPABILITY_LMASTER" "no" - maybe_set "--lvs --single-public-ip" "$CTDB_LVS_PUBLIC_IP" maybe_set "--script-log-level" "$CTDB_SCRIPT_LOG_LEVEL" maybe_set "--max-persistent-check-errors" "$CTDB_MAX_PERSISTENT_CHECK_ERRORS" } diff --git a/ctdb/config/events.d/91.lvs b/ctdb/config/events.d/91.lvs index c6ef024a088..9fe38de8dac 100755 --- a/ctdb/config/events.d/91.lvs +++ b/ctdb/config/events.d/91.lvs @@ -8,16 +8,47 @@ loadconfig ctdb -[ -z "$CTDB_LVS_PUBLIC_IP" ] && exit 0 -[ -z "$CTDB_PUBLIC_INTERFACE" ] && exit 0 +[ -n "$CTDB_LVS_NODES" ] || exit 0 +export CTDB_LVS_NODES if ! type ipvsadm >/dev/null 2>&1 ; then echo "LVS configured but ipvsadm not found" exit 0 fi + +lvs_slave_only () +{ + ctdb_get_ip_address + + awk -v my_ip="$ip_address" \ + '$1 == my_ip { if ($2 ~ "slave-only") { exit 0 } else { exit 1 } }' \ + "$CTDB_LVS_NODES" +} + +lvs_check_config () +{ + [ -r "$CTDB_LVS_NODES" ] || \ + die "error: CTDB_LVS_NODES=${CTDB_LVS_NODES} unreadable" + [ -n "$CTDB_LVS_PUBLIC_IP" ] || \ + die "Invalid configuration: CTDB_LVS_PUBLIC_IP not set" + if ! lvs_slave_only ; then + [ -n "$CTDB_LVS_PUBLIC_IFACE" ] || \ + die "Invalid configuration: CTDB_LVS_PUBLIC_IFACE not set" + fi + + if [ "$CTDB_PARTIALLY_ONLINE_INTERFACES" = "yes" ] ; then + die "Invalid configuration: CTDB_PARTIALLY_ONLINE_INTERFACES=yes incompatible with LVS" + fi +} + case "$1" in +setup) + lvs_check_config + ;; startup) + lvs_check_config + ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1 ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1 @@ -30,6 +61,8 @@ startup) ;; shutdown) + lvs_check_config + ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP" ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP" @@ -39,14 +72,16 @@ shutdown) ;; ipreallocated) + lvs_check_config + # Kill connections ipvsadm -D -t "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1 ipvsadm -D -u "$CTDB_LVS_PUBLIC_IP" >/dev/null 2>&1 kill_tcp_connections_local_only \ - "$CTDB_PUBLIC_INTERFACE" "$CTDB_LVS_PUBLIC_IP" + "$CTDB_LVS_PUBLIC_IFACE" "$CTDB_LVS_PUBLIC_IP" ctdb_get_pnn - lvsmaster=$(ctdb lvsmaster | sed -n -e 's/Node \([0-9]*\) is LVS master/\1/p') + lvsmaster=$("${CTDB_HELPER_BINDIR}/ctdb_lvs" master | sed -n -e 's/Node \([0-9]*\) is LVS master/\1/p') if [ "$pnn" != "$lvsmaster" ] ; then # This node is not the LVS master so change the IP address # to have scope "host" so this node won't respond to ARPs @@ -63,7 +98,7 @@ ipreallocated) ipvsadm -A -u "$CTDB_LVS_PUBLIC_IP" -p 1999999 -s lc # Add all nodes (except this node) as LVS servers - ctdb lvs | + "${CTDB_HELPER_BINDIR}/ctdb_lvs" list | awk -F: -v pnn="$pnn" '$1 != pnn { print $2 }' | while read ip ; do ipvsadm -a -t "$CTDB_LVS_PUBLIC_IP" -r $ip -g @@ -75,7 +110,7 @@ ipreallocated) ipvsadm -a -u "$CTDB_LVS_PUBLIC_IP" -r 127.0.0.1 ctdb gratiousarp \ - "$CTDB_LVS_PUBLIC_IP" "$CTDB_PUBLIC_INTERFACE" >/dev/null 2>&1 + "$CTDB_LVS_PUBLIC_IP" "$CTDB_LVS_PUBLIC_IFACE" >/dev/null 2>&1 flush_route_cache ;; diff --git a/ctdb/doc/ctdb.7.xml b/ctdb/doc/ctdb.7.xml index 40d3387f078..51222ad48a4 100644 --- a/ctdb/doc/ctdb.7.xml +++ b/ctdb/doc/ctdb.7.xml @@ -436,21 +436,6 @@ Node 3:/usr/local/etc/ctdb/public_addresses - - LVS - - - Indicates that a node is configued in Linux Virtual Server - (LVS) mode. In this mode the entire CTDB cluster uses one - single public address for the entire cluster instead of - using multiple public addresses in failover mode. This is - an alternative to using a load-balancing layer-4 switch. - See the LVS section for more - details. - - - - @@ -477,25 +462,35 @@ Node 3:/usr/local/etc/ctdb/public_addresses - In this mode the cluster selects a set of nodes in the cluster - and loadbalance all client access to the LVS address across this - set of nodes. This set of nodes are all LVS capable nodes that - are HEALTHY, or if no HEALTHY nodes exists all LVS capable nodes - regardless of health status. LVS will however never loadbalance - traffic to nodes that are BANNED, STOPPED, DISABLED or - DISCONNECTED. The ctdb lvs command is used to - show which nodes are currently load-balanced across. + One extra LVS public address is assigned on the public network + to each LVS group. Each LVS group is a set of nodes in the + cluster that presents the same LVS address public address to the + outside world. Normally there would only be one LVS group + spanning an entire cluster, but in situations where one CTDB + cluster spans multiple physical sites it might be useful to have + one LVS group for each site. There can be multiple LVS groups + in a cluster but each node can only be member of one LVS group. - One of the these nodes are elected as the LVSMASTER. This node - receives all traffic from clients coming in to the LVS address - and multiplexes it across the internal network to one of the - nodes that LVS is using. When responding to the client, that - node will send the data back directly to the client, bypassing - the LVSMASTER node. The command ctdb - lvsmaster will show which node is the current - LVSMASTER. + Client access to the cluster is load-balanced across the HEALTHY + nodes in an LVS group. If no HEALTHY nodes exists then all + nodes in the group are used, regardless of health status. CTDB + will, however never load-balance LVS traffic to nodes that are + BANNED, STOPPED, DISABLED or DISCONNECTED. The ctdb + lvs command is used to show which nodes are currently + load-balanced across. + + + + In each LVS group, one of the nodes is selected by CTDB to be + the LVS master. This node receives all traffic from clients + coming in to the LVS public address and multiplexes it across + the internal network to one of the nodes that LVS is using. + When responding to the client, that node will send the data back + directly to the client, bypassing the LVS master node. The + command ctdb lvsmaster will show which node + is the current LVS master. @@ -525,7 +520,7 @@ Node 3:/usr/local/etc/ctdb/public_addresses - + This means that all incoming traffic to the cluster will pass through one physical node, which limits scalability. You can send more data to the LVS address that one physical node can @@ -565,20 +560,50 @@ Node 3:/usr/local/etc/ctdb/public_addresses To activate LVS on a CTDB node you must specify the - CTDB_PUBLIC_INTERFACE and - CTDB_LVS_PUBLIC_IP configuration variables. - Setting the latter variable also enables the LVS capability on - the node at startup. + CTDB_LVS_PUBLIC_IFACE, + CTDB_LVS_PUBLIC_IP and + CTDB_LVS_NODES configuration variables. + CTDB_LVS_NODES specifies a file containing + the private address of all nodes in the current node's LVS + group. - + Example: -CTDB_PUBLIC_INTERFACE=eth1 +CTDB_LVS_PUBLIC_IFACE=eth1 CTDB_LVS_PUBLIC_IP=10.1.1.237 +CTDB_LVS_NODES=/usr/local/etc/ctdb/lvs_nodes + + Example /usr/local/etc/ctdb/lvs_nodes: + + +192.168.1.2 +192.168.1.3 +192.168.1.4 + + + + Normally any node in an LVS group can act as the LVS master. + Nodes that are highly loaded due to other demands maybe + flagged with the "slave-only" option in the + CTDB_LVS_NODES file to limit the LVS + functionality of those nodes. + + + + LVS nodes file that excludes 192.168.1.4 from being + the LVS master node: + + +192.168.1.2 +192.168.1.3 +192.168.1.4 slave-only + + diff --git a/ctdb/doc/ctdbd.conf.5.xml b/ctdb/doc/ctdbd.conf.5.xml index 5494b51ad0a..324be050135 100644 --- a/ctdb/doc/ctdbd.conf.5.xml +++ b/ctdb/doc/ctdbd.conf.5.xml @@ -302,16 +302,6 @@ - - CTDB_LVS_PUBLIC_IP=IPADDR - - - No default. Corresponds to " - . - - - - CTDB_NODES=FILENAME @@ -817,6 +807,81 @@ CTDB_PER_IP_ROUTING_TABLE_ID_HIGH=9000 + + LVS + + + For a general description see the LVS + section in ctdb + 7. + + + + Eventscript + + + 91.lvs + + + + + + + CTDB_LVS_NODES=FILENAME + + + FILENAME contains the list of nodes that belong to the + same LVS group. + + + File format: + +IPADDR slave-only + + + + IPADDR is the private IP address of each node in the LVS + group. + + + If "slave-only" is specified then the corresponding node + can not be the LVS master node. In this case + CTDB_LVS_PUBLIC_IFACE and + CTDB_LVS_PUBLIC_IP are optional and + unused. + + + No default, usually + /usr/local/etc/ctdb/lvs_nodes when enabled. + + + + + + CTDB_LVS_PUBLIC_IFACE=INTERFACE + + + INTERFACE is the network interface that clients will use + to connection to CTDB_LVS_PUBLIC_IP. + This is optional for slave-only nodes. + No default. + + + + + + CTDB_LVS_PUBLIC_IP=IPADDR + + + CTDB_LVS_PUBLIC_IP is the LVS public address. No + default. + + + + + + + MISCELLANEOUS NETWORK CONFIGURATION diff --git a/ctdb/tests/eventscripts/scripts/local.sh b/ctdb/tests/eventscripts/scripts/local.sh index 0f15be4edb9..51f38580f7e 100644 --- a/ctdb/tests/eventscripts/scripts/local.sh +++ b/ctdb/tests/eventscripts/scripts/local.sh @@ -543,9 +543,6 @@ EOF setup_ctdb_lvs () { - export CTDB_LVS_PUBLIC_IP="$1" - export CTDB_PUBLIC_INTERFACE="$2" - lvs_state_dir="${EVENTSCRIPTS_TESTS_VAR_DIR}/lvs" mkdir -p "$lvs_state_dir" @@ -554,8 +551,12 @@ setup_ctdb_lvs () lvs_header=$(ipvsadm -l -n) - # Not an official configuration file, just used by the ctdb - # tool stub + export CTDB_LVS_PUBLIC_IP="$1" + export CTDB_LVS_PUBLIC_IFACE="$2" + + [ -n "$CTDB_LVS_PUBLIC_IP" ] || return 0 + [ -n "$CTDB_LVS_PUBLIC_IFACE" ] || return 0 + export CTDB_LVS_NODES=$(mktemp --tmpdir="$lvs_state_dir") export FAKE_CTDB_LVS_MASTER="" diff --git a/ctdb/tests/eventscripts/stubs/ctdb b/ctdb/tests/eventscripts/stubs/ctdb index 44b934a6a41..4c4278dd5be 100755 --- a/ctdb/tests/eventscripts/stubs/ctdb +++ b/ctdb/tests/eventscripts/stubs/ctdb @@ -236,16 +236,16 @@ ctdb_shutdown () ###################################################################### -# This is only used by the NAT gateway code at the moment, so use a -# hack. Assume that $CTDB_NATGW_NODES contains all nodes in the -# cluster (which is what current tests assume). Use the PNN to find -# the address from this file. The NAT gateway code only used the -# address, so just mark the node healthy. +# This is only used by the NAT and LVS gateway code at the moment, so +# use a hack. Assume that $CTDB_NATGW_NODES or $CTDB_LVS_NODES +# contains all nodes in the cluster (which is what current tests +# assume). Use the PNN to find the address from this file. The NAT +# gateway code only used the address, so just mark the node healthy. ctdb_nodestatus () { echo '|Node|IP|Disconnected|Banned|Disabled|Unhealthy|Stopped|Inactive|PartiallyOnline|ThisNode|' _line=$(( $FAKE_CTDB_PNN + 1 )) - _ip=$(sed -e "${_line}p" "$CTDB_NATGW_NODES") + _ip=$(sed -e "${_line}p" "${CTDB_NATGW_NODES:-${CTDB_LVS_NODES}}") echo "|${FAKE_CTDB_PNN}|${_ip}|0|0|0|0|0|0|0|Y|" } @@ -358,28 +358,6 @@ EOF ###################################################################### -ctdb_lvs_master () -{ - if [ -n "$FAKE_CTDB_LVS_MASTER" ] ; then - echo "Node ${FAKE_CTDB_LVS_MASTER} is LVS master" - return 0 - else - echo "This is no LVS master" - return 255 - fi -} - -ctdb_lvs () -{ - _pnn=0 - while read _ip _opts ; do - echo "${_pnn}:${_ip}" - _pnn=$(($_pnn + 1)) - done <"$CTDB_LVS_NODES" -} - -###################################################################### - case "$1" in gettickles) setup_tickles @@ -477,7 +455,5 @@ case "$1" in shutdown) ctdb_shutdown "$@";; setvar) ctdb_setvar "$@" ;; nodestatus) ctdb_nodestatus "$@" ;; - lvsmaster) ctdb_lvs_master "$@" ;; - lvs) ctdb_lvs "$@" ;; *) not_implemented "$1" ;; esac diff --git a/ctdb/tests/eventscripts/stubs/ctdb_lvs b/ctdb/tests/eventscripts/stubs/ctdb_lvs new file mode 100755 index 00000000000..daca8e229fa --- /dev/null +++ b/ctdb/tests/eventscripts/stubs/ctdb_lvs @@ -0,0 +1,53 @@ +#!/bin/sh + +prog="ctdb_lvs" + +# Print a message and exit. +die () +{ + echo "$1" >&2 ; exit ${2:-1} +} + +not_implemented_exit_code=1 + +usage () +{ + cat >&2 <&2 + exit $not_implemented_exit_code +} + +ctdb_lvs_master () +{ + if [ -n "$FAKE_CTDB_LVS_MASTER" ] ; then + echo "Node ${FAKE_CTDB_LVS_MASTER} is LVS master" + return 0 + else + echo "This is no LVS master" + return 255 + fi +} + +ctdb_lvs_list () +{ + _pnn=0 + while read _ip _opts ; do + echo "${_pnn}:${_ip}" + _pnn=$(($_pnn + 1)) + done <"$CTDB_LVS_NODES" +} + +###################################################################### + +case "$1" in + master) ctdb_lvs_master "$@" ;; + list) ctdb_lvs_list "$@" ;; + *) not_implemented "$1" ;; +esac