mirror of
https://github.com/samba-team/samba.git
synced 2024-12-23 17:34:34 +03:00
38138b42f7
The main change is to source cluster.bash instead of integration.bash. While touching the preamble, the following additional changes are also made: * Drop test_info() definition and replace it with a comment The use of test_info() is pointless. * Drop call to ctdb_test_check_real_cluster() cluster.bash now does this. * Drop call to cluster_is_healthy() This is a holdover from when the previous test would restart daemons to get things ready for a test. There was also a bug where going into recovery during the restart would sometimes cause the cluster to become unhealthy. If we really need something like this then we can add it to ctdb_test_init(). Signed-off-by: Martin Schwenke <martin@meltin.net> Reviewed-by: Amitay Isaacs <amitay@gmail.com>
79 lines
2.1 KiB
Bash
Executable File
79 lines
2.1 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Verify that the server end of an NFS connection is correctly reset
|
|
|
|
# Prerequisites:
|
|
|
|
# * An active CTDB cluster with at least 2 nodes with public addresses.
|
|
|
|
# * Test must be run on a real or virtual cluster rather than against
|
|
# local daemons.
|
|
|
|
# * Test must not be run from a cluster node.
|
|
|
|
# * Cluster nodes must be listening on the NFS TCP port (2049).
|
|
|
|
# Expected results:
|
|
|
|
# * CTDB should correctly record the connection and the releasing node
|
|
# should reset the server end of the connection.
|
|
|
|
. "${TEST_SCRIPTS_DIR}/cluster.bash"
|
|
|
|
set -e
|
|
|
|
ctdb_test_init
|
|
|
|
# We need this for later, so we know how long to sleep.
|
|
try_command_on_node 0 $CTDB getvar MonitorInterval
|
|
monitor_interval="${out#*= }"
|
|
|
|
select_test_node_and_ips
|
|
|
|
test_port=2049
|
|
|
|
echo "Set NoIPTakeover=1 on all nodes"
|
|
try_command_on_node all $CTDB setvar NoIPTakeover 1
|
|
|
|
echo "Give the recovery daemon some time to reload tunables"
|
|
sleep_for 5
|
|
|
|
echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with nc..."
|
|
|
|
sleep $((monitor_interval * 4)) | nc $test_ip $test_port &
|
|
nc_pid=$!
|
|
ctdb_test_exit_hook_add "kill $nc_pid >/dev/null 2>&1"
|
|
|
|
wait_until_get_src_socket "tcp" "${test_ip}:${test_port}" $nc_pid "nc"
|
|
src_socket="$out"
|
|
echo "Source socket is $src_socket"
|
|
|
|
echo "Wait until NFS connection is tracked by CTDB on test node ..."
|
|
wait_until $((monitor_interval * 2)) \
|
|
check_tickles $test_node $test_ip $test_port $src_socket
|
|
cat "$outfile"
|
|
|
|
# It would be nice if ss consistently used local/peer instead of src/dst
|
|
ss_filter="src ${test_ip}:${test_port} dst ${src_socket}"
|
|
|
|
try_command_on_node $test_node \
|
|
"ss -tn state established '${ss_filter}' | tail -n +2"
|
|
if [ -z "$out" ] ; then
|
|
echo "BAD: ss did not list the socket"
|
|
exit 1
|
|
fi
|
|
echo "GOOD: ss lists the socket:"
|
|
cat "$outfile"
|
|
|
|
echo "Disabling node $test_node"
|
|
try_command_on_node 1 $CTDB disable -n $test_node
|
|
wait_until_node_has_status $test_node disabled
|
|
|
|
try_command_on_node $test_node \
|
|
"ss -tn state established '${ss_filter}' | tail -n +2"
|
|
if [ -n "$out" ] ; then
|
|
echo "BAD: ss listed the socket after failover"
|
|
exit 1
|
|
fi
|
|
echo "GOOD: ss no longer lists the socket"
|