1
0
mirror of https://github.com/samba-team/samba.git synced 2025-02-02 09:47:23 +03:00

Test suite: new tests and code factoring.

* 2 new tests for NFS failover.

* Factor repeated code from tests into new functions
  select_test_node_and_ips(), gratarp_sniff_start() and
  gratarp_sniff_wait_show().  Use these new functions in existing and
  new tests.

Signed-off-by: Martin Schwenke <martin@meltin.net>

(This used to be ctdb commit de0b58e18fcc0f90075fca74077ab62ae8dab5da)
This commit is contained in:
Martin Schwenke 2009-07-08 13:37:52 +10:00
parent 96b3517356
commit 168ec02adf
10 changed files with 242 additions and 134 deletions

View File

@ -57,23 +57,8 @@ try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#echo "Monitor interval on node $test_node is $monitor_interval seconds."
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
select_test_node_and_ips
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
test_port=2049
echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."

View File

@ -56,23 +56,8 @@ try_command_on_node 0 $CTDB getvar MonitorInterval
monitor_interval="${out#*= }"
#echo "Monitor interval on node $test_node is $monitor_interval seconds."
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
select_test_node_and_ips
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
test_port=445
echo "Connecting to node ${test_node} on IP ${test_ip}:${test_port} with netcat..."

View File

@ -51,23 +51,7 @@ cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
select_test_node_and_ips
echo "Removing ${test_ip} from the local ARP table..."
arp -d $test_ip >/dev/null 2>&1 || true
@ -81,17 +65,13 @@ original_mac=$(arp -n $test_ip | awk '$2 == "ether" {print $3}')
echo "MAC address is: ${original_mac}"
filter="arp net ${test_ip}"
tcpdump_start "$filter"
gratarp_sniff_start
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
tcpdump_wait 2
echo "GOOD: this should be the gratuitous ARP and the reply:"
tcpdump_show
gratarp_sniff_wait_show
echo "Getting MAC address associated with ${test_ip} again..."
new_mac=$(arp -n $test_ip | awk '$2 == "ether" {print $3}')

View File

@ -45,23 +45,7 @@ cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
select_test_node_and_ips
echo "Removing ${test_ip} from the local ARP table..."
arp -d $test_ip >/dev/null 2>&1 || true
@ -69,17 +53,13 @@ arp -d $test_ip >/dev/null 2>&1 || true
echo "Pinging ${test_ip}..."
ping -q -n -c 1 $test_ip
filter="arp net ${test_ip}"
tcpdump_start "$filter"
gratarp_sniff_start
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
tcpdump_wait 2
echo "GOOD: this should be the gratuitous ARP and the reply:"
tcpdump_show
gratarp_sniff_wait_show
echo "Removing ${test_ip} from the local ARP table again..."
arp -d $test_ip >/dev/null 2>&1 || true

View File

@ -45,23 +45,7 @@ cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
test_ip="${ips%% *}"
select_test_node_and_ips
echo "Removing ${test_ip} from the local ARP table..."
arp -d $test_ip >/dev/null 2>&1 || true
@ -72,17 +56,13 @@ original_hostname=$(ssh $test_ip hostname)
echo "Hostname is: ${original_hostname}"
filter="arp net ${test_ip}"
tcpdump_start "$filter"
gratarp_sniff_start
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
tcpdump_wait 2
echo "GOOD: this should be the gratuitous ARP and the reply:"
tcpdump_show
gratarp_sniff_wait_show
echo "SSHing to ${test_ip} and running hostname (again)..."
new_hostname=$(ssh $test_ip hostname)

View File

@ -0,0 +1,86 @@
#!/bin/bash
test_info()
{
cat <<EOF
Verify that a mounted NFS share is still operational after failover.
We mount an NFS share from a node, write a file via NFS and then
confirm that we can correctly read the file after a failover.
Prerequisites:
* An active CTDB cluster with at least 2 nodes with public addresses.
* Test must be run on a real or virtual cluster rather than against
local daemons.
* Test must not be run from a cluster node.
Steps:
1. Verify that the cluster is healthy.
2. Select a public address and its corresponding node.
3. Select the 1st NFS share exported on the node.
4. Mount the selected NFS share.
5. Create a file in the NFS mount and calculate its checksum.
6. Disable the selected node.
7. Read the file and calculate its checksum.
8. Compare the checksums.
Expected results:
* When a node is disabled the public address fails over and it is
possible to correctly read a file over NFS. The checksums should be
the same before and after.
EOF
}
. ctdb_test_functions.bash
set -e
ctdb_test_init "$@"
ctdb_test_check_real_cluster
cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
select_test_node_and_ips
first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
mnt_d=$(mktemp -d)
test_file="${mnt_d}/$RANDOM"
ctdb_test_exit_hook_add rm -f "$test_file"
ctdb_test_exit_hook_add umount -f "$mnt_d"
ctdb_test_exit_hook_add rmdir "$mnt_d"
echo "Mounting ${test_ip}:${first_export} on ${mnt_d} ..."
mount -o timeo=1,hard,intr,vers=3 ${test_ip}:${first_export} ${mnt_d}
echo "Create file containing random data..."
dd if=/dev/urandom of=$test_file bs=1k count=1
original_sum=$(sum $test_file)
[ $? -eq 0 ]
gratarp_sniff_start
echo "Disabling node $test_node"
try_command_on_node 0 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
gratarp_sniff_wait_show
new_sum=$(sum $test_file)
[ $? -eq 0 ]
if [ "$original_md5" = "$new_md5" ] ; then
echo "GOOD: file contents unchanged after failover"
else
echo "BAD: file contents are different after failover"
testfailures=1
fi

View File

@ -0,0 +1,106 @@
#!/bin/bash
test_info()
{
cat <<EOF
Verify that a file created on a node is readable via NFS after a failover.
We write a file into an exported directory on a node, mount the NFS
share from a node, verify that we can read the file via NFS and that
we can still read it after a failover.
Prerequisites:
* An active CTDB cluster with at least 2 nodes with public addresses.
* Test must be run on a real or virtual cluster rather than against
local daemons.
* Test must not be run from a cluster node.
Steps:
1. Verify that the cluster is healthy.
2. Select a public address and its corresponding node.
3. Select the 1st NFS share exported on the node.
4. Write a file into exported directory on the node and calculate its
checksum.
5. Mount the selected NFS share.
6. Read the file via the NFS mount and calculate its checksum.
7. Compare checksums.
8. Disable the selected node.
9. Read the file via NFS and calculate its checksum.
10. Compare the checksums.
Expected results:
* Checksums for the file on all 3 occasions should be the same.
EOF
}
. ctdb_test_functions.bash
set -e
ctdb_test_init "$@"
ctdb_test_check_real_cluster
cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
select_test_node_and_ips
first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
local_f=$(mktemp)
mnt_d=$(mktemp -d)
nfs_f="${mnt_d}/$RANDOM"
remote_f="${test_ip}:${first_export}/$(basename $nfs_f)"
ctdb_test_exit_hook_add rm -f "$local_f"
ctdb_test_exit_hook_add rm -f "$nfs_f"
ctdb_test_exit_hook_add umount -f "$mnt_d"
ctdb_test_exit_hook_add rmdir "$mnt_d"
echo "Create file containing random data..."
dd if=/dev/urandom of=$local_f bs=1k count=1
local_sum=$(sum $local_f)
[ $? -eq 0 ]
scp "$local_f" "$remote_f"
echo "Mounting ${test_ip}:${first_export} on ${mnt_d} ..."
mount -o timeo=1,hard,intr,vers=3 ${test_ip}:${first_export} ${mnt_d}
nfs_sum=$(sum $nfs_f)
if [ "$local_sum" = "$nfs_sum" ] ; then
echo "GOOD: file contents read correctly via NFS"
else
echo "BAD: file contents are different over NFS"
echo " original file: $local_sum"
echo " NFS file: $nfs_sum"
exit 1
fi
gratarp_sniff_start
echo "Disabling node $test_node"
try_command_on_node 0 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
gratarp_sniff_wait_show
new_sum=$(sum $nfs_f)
[ $? -eq 0 ]
if [ "$nfs_sum" = "$new_sum" ] ; then
echo "GOOD: file contents unchanged after failover"
else
echo "BAD: file contents are different after failover"
echo " original file: $nfs_sum"
echo " NFS file: $new_sum"
exit 1
fi

View File

@ -258,6 +258,27 @@ sanity_check_ips ()
return 1
}
select_test_node_and_ips ()
{
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public
# IPs. This will work and is economically semi-random. :-)
local x
read x test_node <<<"$out"
test_node_ips=""
local ip pnn
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: ${test_node_ips}."
test_ip="${test_node_ips%% *}"
}
#######################################
# Wait until either timeout expires or command succeeds. The command
@ -544,6 +565,19 @@ tcptickle_sniff_wait_show ()
tcpdump_show
}
gratarp_sniff_start ()
{
tcpdump_start "arp host ${test_ip}"
}
gratarp_sniff_wait_show ()
{
tcpdump_wait 2
echo "GOOD: this should be the some gratuitous ARPs:"
tcpdump_show
}
#######################################

View File

@ -40,21 +40,7 @@ cluster_is_healthy
# Reset configuration
ctdb_restart_when_done
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
select_test_node_and_ips
echo "Disabling node $test_node"
@ -63,7 +49,7 @@ try_command_on_node 1 $CTDB disable -n $test_node
# Avoid a potential race condition...
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
if wait_until_ips_are_on_nodeglob "[!${test_node}]" $ips ; then
if wait_until_ips_are_on_nodeglob "[!${test_node}]" $test_node_ips ; then
echo "All IPs moved."
else
echo "Some IPs didn't move."

View File

@ -44,28 +44,14 @@ set -e
cluster_is_healthy
echo "Getting list of public IPs..."
try_command_on_node 0 "$CTDB ip -n all | sed -e '1d'"
# When selecting test_node we just want a node that has public IPs.
# This will work and is economically semi-randomly. :-)
read x test_node <<<"$out"
ips=""
while read ip pnn ; do
if [ "$pnn" = "$test_node" ] ; then
ips="${ips}${ips:+ }${ip}"
fi
done <<<"$out" # bashism to avoid problem setting variable in pipeline.
echo "Selected node ${test_node} with IPs: $ips"
select_test_node_and_ips
echo "Disabling node $test_node"
try_command_on_node 1 $CTDB disable -n $test_node
onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node disabled
if wait_until_ips_are_on_nodeglob "[!${test_node}]" $ips ; then
if wait_until_ips_are_on_nodeglob "[!${test_node}]" $test_node_ips ; then
echo "All IPs moved."
else
echo "Some IPs didn't move."
@ -79,7 +65,7 @@ onnode 0 $CTDB_TEST_WRAPPER wait_until_node_has_status $test_node enabled
# BUG: this is only guaranteed if DeterministicIPs is 1 and
# NoIPFailback is 0.
if wait_until_ips_are_on_nodeglob "$test_node" $ips ; then
if wait_until_ips_are_on_nodeglob "$test_node" $test_node_ips ; then
echo "All IPs moved."
else
echo "Some IPs didn't move."