1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-13 13:18:06 +03:00

Merge branch 'new_tests'

(This used to be ctdb commit 10531b50e2d306a5e62b8d488a1acc9e75b0ad4b)
This commit is contained in:
Martin Schwenke 2009-06-16 12:52:10 +10:00
commit 9467e39c82
3 changed files with 298 additions and 1 deletions

View File

@ -0,0 +1,115 @@
#!/bin/bash
test_info()
{
cat <<EOF
Verify that NFS connections are monitored and that NFS tickles are sent.
We create a connection to the NFS server on a node and confirm that
this connection is registered in the nfs-tickles/ subdirectory in
shared storage. Then disable the relevant NFS server node and ensure
that it send an appropriate reset packet.
Prerequisites:
* An active CTDB cluster with at least 2 nodes with public addresses.
* Test must be run on a real or virtual cluster rather than against
local daemons.
* Test must not be run from a cluster node.
* Cluster nodes must be listening on the NFS TCP port (2049).
Steps:
1. Verify that the cluster is healthy.
2. Connect from the current host (test client) to TCP port 2049 using
the public address of a cluster node.
3. Determine the source socket used for the connection.
4. Ensure that CTDB records the source socket details in the nfs-tickles
directory on shared storage.
5. Disable the node that the connection has been made to.
6. Verify that a TCP tickle (a reset packet) is sent to the test client.
Expected results:
* CTDB should correctly record the socket in the nfs-tickles directory
and should send a reset packet when the node is disabled.
EOF
}
. ctdb_test_functions.bash
set -e
ctdb_test_init "$@"
ctdb_test_check_real_cluster
onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy
# We need this for later, so we know how long to sleep.
try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#echo "Monitor interval on node $test_node is $monitor_interval seconds."
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
test_port=2049
echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."
nc -d -w $(($monitor_interval * 4)) $test_ip $test_port &
nc_pid=$!
ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
src_socket="$out"
echo "Source socket is $src_socket"
echo "Sleeping for MonitorInterval..."
sleep_for $monitor_interval
try_command_on_node $test_node hostname
test_hostname=$out
try_command_on_node -v 0 cat /gpfs/.ctdb/nfs-tickles/$test_hostname/$test_ip
if [ "${out/${src_socket}/}" != "$out" ] ; then
echo "GOOD: NFS connection tracked OK in tickles file."
else
echo "BAD: Socket not tracked in NFS tickles file:"
testfailures=1
fi
filter="src host $test_ip and tcp src port $test_port and dst host ${src_socket%:*} and tcp dst port ${src_socket##*:} and tcp[tcpflags] & tcp-rst != 0"
tcpdump_start "$filter"
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
tcpdump_wait
echo "GOOD: here's the tickle reset:"
tcpdump -n -r $tcpdump_filename 2>/dev/null
echo "Expect a restart..."
ctdb_test_exit

View File

@ -0,0 +1,112 @@
#!/bin/bash
test_info()
{
cat <<EOF
Verify that CIFS connections are monitored and that CIFS tickles are sent.
We create a connection to the CIFS server on a node and confirm that
this connection is registered by CTDB. Then disable the relevant CIFS
server node and ensure that it send an appropriate reset packet.
Prerequisites:
* An active CTDB cluster with at least 2 nodes with public addresses.
* Test must be run on a real or virtual cluster rather than against
local daemons.
* Test must not be run from a cluster node.
* Clustered Samba must be listening on TCP port 445.
Steps:
1. Verify that the cluster is healthy.
2. Connect from the current host (test client) to TCP port 445 using
the public address of a cluster node.
3. Determine the source socket used for the connection.
4. Using the "ctdb gettickle" command, ensure that CTDB records the
connection details.
5. Disable the node that the connection has been made to.
6. Verify that a TCP tickle (a reset packet) is sent to the test client.
Expected results:
* CTDB should correctly record the connection and should send a reset
packet when the node is disabled.
EOF
}
. ctdb_test_functions.bash
set -e
ctdb_test_init "$@"
ctdb_test_check_real_cluster
onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy
# We need this for later, so we know how long to sleep.
try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#echo "Monitor interval on node $test_node is $monitor_interval seconds."
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
test_port=445
echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."
nc -d -w $(($monitor_interval * 4)) $test_ip $test_port &
nc_pid=$!
ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
src_socket="$out"
echo "Source socket is $src_socket"
# Right here we assume that Samba is able to register the tickle with
# CTDB faster than it takes us to wait for netstat to register the
# connection and then use onnode below to ask CTDB about it.
try_command_on_node -v 0 ctdb gettickles $test_ip
if [ "${out/SRC: ${src_socket} /}" != "$out" ] ; then
echo "GOOD: CIFS connection tracked OK by CTDB."
else
echo "BAD: Socket not tracked by CTDB."
testfailures=1
fi
filter="src host $test_ip and tcp src port $test_port and dst host ${src_socket%:*} and tcp dst port ${src_socket##*:} and tcp[tcpflags] & tcp-rst != 0"
tcpdump_start "$filter"
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
tcpdump_wait
echo "GOOD: here's the tickle reset:"
tcpdump -n -r $tcpdump_filename 2>/dev/null
echo "Expect a restart..."
ctdb_test_exit

View File

@ -61,7 +61,7 @@ ctdb_test_exit ()
[ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
eval "$ctdb_test_exit_hook"
eval "$ctdb_test_exit_hook" || true
unset ctdb_test_exit_hook
if ! onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy ; then
@ -79,6 +79,11 @@ ctdb_test_exit ()
test_exit
}
ctdb_test_exit_hook_add ()
{
ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
}
ctdb_test_run ()
{
local name="$1" ; shift
@ -150,6 +155,14 @@ ctdb_test_init ()
trap "ctdb_test_exit" 0
}
ctdb_test_check_real_cluster ()
{
[ -n "$CTDB_TEST_REAL_CLUSTER" ] && return 0
echo "ERROR: This test must be run on a real/virtual cluster, not local daemons."
return 1
}
########################################
# Sets: $out
@ -401,6 +414,63 @@ wait_until_ips_are_on_nodeglob ()
wait_until 60 ips_are_on_nodeglob "$@"
}
get_src_socket ()
{
local proto="$1"
local dst_socket="$2"
local pid="$3"
local prog="$4"
local pat="^${proto}[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[^[:space:]]+[[:space:]]+${dst_socket//./\\.}[[:space:]]+ESTABLISHED[[:space:]]+${pid}/${prog}[[:space:]]*\$"
out=$(netstat -tanp |
egrep "$pat" |
awk '{ print $4 }')
[ -n "$out" ]
}
wait_until_get_src_socket ()
{
local proto="$1"
local dst_socket="$2"
local pid="$3"
local prog="$4"
echo "Waiting for ${prog} to establish connection to ${dst_socket}..."
wait_until 5 get_src_socket "$@"
}
# filename will be in $tcpdump_filename, pid in $tcpdump_pid
# By default, wait for 1 matching packet on any interface.
tcpdump_start ()
{
local filter="$1"
local count="${2:-1}"
local iface="${3:-any}"
echo "Running tcpdump to capture ${count} packet(s) on interface ${iface}."
tcpdump_filename=$(mktemp)
ctdb_test_exit_hook_add "rm -f $tcpdump_filename"
tcpdump -s 1500 -w $tcpdump_filename -c "$count" -i "$iface" "$filter" &
tcpdump_pid=$!
ctdb_test_exit_hook_add "kill $tcpdump_pid >/dev/null 2>&1"
echo "Waiting for tcpdump output file to be initialised..."
wait_until 10 test -f $tcpdump_filename
sleep_for 1
}
not ()
{
! "$@"
}
tcpdump_wait ()
{
echo "Waiting for tcpdump to complete..."
wait_until 5 not kill -0 $tcpdump_pid >/dev/null 2>&1
}
#######################################
daemons_stop ()