glusterd: optimization of test cases

To reduce the overall time taken by the every regression job for all glusterd test cases,
avoiding some duplicate tests by clubbing similar test cases into one.

real time taken for all regression jobs of glusterd without this patch is 1959 seconds,
with this patch it is 1059 seconds.

Look at the below document for your reference.
https://docs.google.com/document/d/1u8o4-wocrsuPDI8BwuBU6yi_x4xA_pf2qSrFY6WEQpo/edit?usp=sharing

Change-Id: Ib14c61ace97e62c3abce47230dd40598640fe9cb
BUG: 1530905
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
This commit is contained in:
Sanju Rakonde 2018-01-04 10:35:29 +05:30 committed by Atin Mukherjee
parent 446ddbf1b1
commit 535fd517c6
93 changed files with 1576 additions and 2697 deletions

View File

@ -0,0 +1,110 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
#bug-1102656 - validating volume top command
TEST $CLI volume top $V0 open
TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
TEST $CLI volume top $V0 read
TEST $CLI volume status
#bug- 1002556
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}3 force
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
TEST killall glusterd
TEST glusterd
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
#bug-1406411- fail-add-brick-when-replica-count-changes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
TEST kill_brick $V0 $H0 $B0/${V0}1
#add-brick should fail
TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
TEST kill_brick $V1 $H0 $B0/${V1}1
#add-brick should fail
TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
TEST $CLI volume start $V1 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
#bug-905307 - validate cluster.post-op-delay-secs option
#Strings should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
#-ve ints should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
#INT_MAX+1 should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
#floats should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
#min val 0 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
#max val 2147483647 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
#some middle val in range 2147 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
#bug-1265479 - validate-replica-volume-options
#Setting data-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V1 data-self-heal on
EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
TEST $CLI volume set $V1 cluster.data-self-heal on
EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
#Setting metadata-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V1 metadata-self-heal on
EXPECT 'on' volinfo_field $V1 'cluster.metadata-self-heal';
TEST $CLI volume set $V1 cluster.metadata-self-heal on
#Setting entry-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V1 entry-self-heal on
EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
TEST $CLI volume set $V1 cluster.entry-self-heal on
EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
cleanup

View File

@ -4,30 +4,46 @@
. $(dirname $0)/../../traps.rc
. $(dirname $0)/../../volume.rc
function count_brick_processes {
pgrep glusterfsd | wc -l
}
function count_brick_pids {
$CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
| grep -v "N/A" | sort | uniq | wc -l
}
cleanup;
function count_up_bricks {
$CLI --xml volume status | grep '<status>1' | wc -l
}
function count_brick_processes {
pgrep glusterfsd | wc -l
}
#bug-1451248 - validate brick mux after glusterd reboot
TEST glusterd
TEST $CLI volume set all cluster.brick-multiplex on
push_trapfunc "$CLI volume set all cluster.brick-multiplex off"
push_trapfunc "cleanup"
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1}
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
TEST $CLI volume start $V0
EXPECT 1 count_brick_processes
EXPECT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
pkill gluster
TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
EXPECT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
#bug-1446172 - reset brick with brick multiplexing enabled
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
# Create files
@ -38,7 +54,7 @@ done
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 start
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 3 count_up_bricks
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Negative case with brick killed but volume-id xattr present
@ -47,9 +63,8 @@ TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
# reset-brick commit force should work and should bring up the brick
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
# Create files
for i in {1..5}
@ -58,8 +73,7 @@ do
done
TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 3 count_up_bricks
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Simulate reset disk
@ -75,5 +89,6 @@ setfattr -x trusted.gfid $B0/${V1}1
# the --wignore flag that essentially makes the command act like "commit force"
TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
cleanup

View File

@ -0,0 +1,81 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function count_brick_processes {
pgrep glusterfsd | wc -l
}
cleanup
#bug-1444596 - validating brick mux
TEST glusterd -LDEBUG
TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
TEST $CLI volume set all cluster.brick-multiplex on
TEST $CLI volume start $V0
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 1 count_brick_processes
#bug-1499509 - stop all the bricks when a brick process is killed
kill -9 $(pgrep glusterfsd)
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count
TEST $CLI volume start $V0 force
TEST $CLI volume start $V1 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
pkill glusterd
TEST glusterd
#Check brick status after restart glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 1 count_brick_processes
TEST $CLI volume set $V1 performance.cache-size 32MB
TEST $CLI volume stop $V1
TEST $CLI volume start $V1
#Check No. of brick processes after change option
EXPECT 2 count_brick_processes
pkill glusterd
TEST glusterd
#Check brick status after restart glusterd should not be NA
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 2 count_brick_processes
pkill glusterd
TEST glusterd
#Check brick status after restart glusterd should not be NA
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 2 count_brick_processes
#bug-1444596_brick_mux_posix_hlth_chk_status
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
TEST rm -rf $H0:$B0/brick{0,1}
#Check No. of brick processes after remove brick from back-end
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
TEST glusterfs -s $H0 --volfile-id $V1 $M0
TEST touch $M0/file{1..10}
pkill glusterd
TEST glusterd -LDEBUG
sleep 5
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
cleanup

View File

@ -1,9 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd
cleanup;

View File

@ -1,25 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume start $V0
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
TEST killall glusterd
TEST glusterd
EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
cleanup

View File

@ -1,46 +0,0 @@
#!/bin/bash
#Test case: After a rebalance fix-layout, check if the rebalance status command
#displays the appropriate message at the CLI.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
#Create a 2x1 distributed volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0
# Mount FUSE and create file/directory
TEST glusterfs -s $H0 --volfile-id $V0 $M0
for i in `seq 1 10`;
do
mkdir $M0/dir_$i
echo file>$M0/dir_$i/file_$i
for j in `seq 1 100`;
do
mkdir $M0/dir_$i/dir_$j
echo file>$M0/dir_$i/dir_$j/file_$j
done
done
#add 2 bricks
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
#perform rebalance fix-layout
TEST $CLI volume rebalance $V0 fix-layout start
EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,26 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
TEST $CLI_1 volume start $V0;
TEST $CLI_1 volume log rotate $V0;
TEST $CLI_1 volume status;
cleanup;

View File

@ -1,53 +0,0 @@
#!/bin/bash
#Test case: Do not allow commit if the bricks are not decommissioned
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
#Create a Distributed volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
TEST $CLI volume start $V0
#Remove bricks and commit without starting
function remove_brick_commit_status {
$CLI volume remove-brick $V0 \
$H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned"
}
EXPECT "decommissioned" remove_brick_commit_status;
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
TEST ! $CLI volume info $V0
#Create a Distributed-Replicate volume
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4};
TEST $CLI volume start $V0
#Try to reduce replica count with start option
function remove_brick_start_status {
$CLI volume remove-brick $V0 replica 1 \
$H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed"
}
EXPECT "failed" remove_brick_start_status;
#Remove bricks with commit option
function remove_brick_commit_status2 {
$CLI volume remove-brick $V0 replica 1 \
$H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 |
grep -oE "success|decommissioned"
}
EXPECT "decommissioned" remove_brick_commit_status2;
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
TEST ! $CLI volume info $V0
cleanup;

View File

@ -1,31 +0,0 @@
#!/bin/bash
#Test case: Create a distributed replicate volume, and reduce
#replica count
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
#Create a 2X3 distributed-replicate volume
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
TEST $CLI volume start $V0
# Reduce to 2x2 volume by specifying bricks in reverse order
function remove_brick_status {
$CLI volume remove-brick $V0 replica 2 \
$H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
}
EXPECT "success" remove_brick_status;
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,19 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
volname="StartMigrationDuringRebalanceTest"
TEST glusterd
TEST pidof glusterd;
TEST $CLI volume info;
TEST $CLI volume create $volname $H0:$B0/${volname}{1,2};
TEST $CLI volume start $volname;
TEST $CLI volume rebalance $volname start;
cleanup;

View File

@ -1,23 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
# Create a 2x2 dist-rep volume; peer probe a new node.
# Performing remove-brick from this new node must succeed
# without crashing it's glusterd
TEST launch_cluster 2;
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
TEST $CLI_1 volume start $V0;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
TEST $CLI_2 volume info
cleanup;

View File

@ -1,33 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
$H0:$B0/${V0}2 $H0:$B0/${V0}3
TEST $CLI volume start $V0
## Mount FUSE
TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
TEST mkdir $M0/dir{1..10};
TEST touch $M0/dir{1..10}/files{1..10};
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
TEST $CLI volume rebalance $V0 start force
EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
TEST pkill gluster
TEST glusterd
TEST pidof glusterd
# status should be "completed" immediate after glusterd has respawned.
EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
cleanup;

View File

@ -11,6 +11,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
#testcase: bug-1085330
# Construct volname string such that its more than 256 characters
for i in {1..30}
@ -73,8 +74,20 @@ TEST ! $CLI volume create $volname $H0:$B0/$brick;
TEST $CLI volume info;
# Positive test case
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume create $V0 $H0:$B0/${V0}1;
TEST $CLI volume info;
cleanup;
TEST $CLI volume start $V0;
#testcase: bug-916549
pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
kill -SIGKILL $brick_pid;
TEST $CLI volume start $V0 force;
TEST process_leak_count $(pidof glusterd);
cleanup

View File

@ -1,26 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../dht.rc
cleanup
#This script checks command "gluster volume rebalance <volname> status will not
#show any output when user have done only remove-brick start and command
#'gluster volume remove-brick <volname> <brick_name> status' will not show
#any output when user have triggered only rebalance start.
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
TEST $CLI volume start $V0
TEST $CLI volume rebalance $V0 start
TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
TEST ! $CLI volume rebalance $V0 status
cleanup

View File

@ -1,24 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
TEST $CLI volume barrier $V0 enable;
TEST ! $CLI volume barrier $V0 enable;
TEST $CLI volume barrier $V0 disable;
TEST ! $CLI volume barrier $V0 disable;
cleanup

View File

@ -1,30 +0,0 @@
#!/bin/bash
## Test case for BZ: 1094119 Remove replace-brick support from gluster
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
# Start glusterd
TEST glusterd
TEST pidof glusterd
## Lets create and start volume
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
## Now with this patch replace-brick only accept following commad
## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
## Apart form this replace brick command will failed.
TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 start
TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 status
TEST ! $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 abort
## replace-brick commit force command should success
TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick3 commit force
cleanup;

View File

@ -1,19 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1;
EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
TEST $CLI volume profile $V0 start
TEST $CLI volume profile $V0 info
cleanup;

View File

@ -1,20 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
TEST $CLI volume top $V0 open
TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
TEST $CLI volume top $V0 read
TEST $CLI volume status
TEST $CLI volume stop $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
cleanup;

View File

@ -1,47 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function get_value()
{
local key=$1
local var="CLI_$2"
eval cli_index=\$$var
$cli_index volume info | grep "^$key"\
| sed 's/.*: //'
}
cleanup
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
EXPECT "$V0" get_value 'Volume Name' 1
EXPECT "Created" get_value 'Status' 1
TEST $CLI_1 volume start $V0
EXPECT "Started" get_value 'Status' 1
#Bring down 2nd glusterd
TEST kill_glusterd 2
#set the volume all options from the 1st glusterd
TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
#Bring back the 2nd glusterd
TEST $glusterd_2
#Verify whether the value has been synced
EXPECT '80' get_value 'cluster.server-quorum-ratio' 1
EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
EXPECT_WITHIN $PROBE_TIMEOUT '80' get_value 'cluster.server-quorum-ratio' 2
cleanup;

View File

@ -1,50 +0,0 @@
#! /bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
# The test will attempt to verify that management handshake requests to
# GlusterD are authenticated before being allowed to change a GlusterD's
# op-version
#
# 1. Launch 3 glusterds
# 2. Probe 2 of them to form a cluster. This should succeed.
# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
# 4. a. Reduce the op-version of 3rd GlusterD and restart it.
# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
# 5. Check current op-version of first two GlusterDs. It shouldn't have changed.
# 6. Probe third GlusterD from the cluster. This should succeed.
cleanup
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2
TEST ! $CLI_3 peer probe $H1
GD1_WD=$($CLI_1 system getwd)
OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
GD3_WD=$($CLI_3 system getwd)
TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
TEST kill_glusterd 3
TEST start_glusterd 3
TEST ! $CLI_3 peer probe $H1
OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
TEST $CLI_1 peer probe $H3
kill_node 1
kill_node 2
kill_node 3
cleanup;

View File

@ -1,18 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4}
TEST $CLI volume start $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start
EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick3"
EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick4"
TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit
TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force
cleanup;

View File

@ -1,34 +0,0 @@
#!/bin/bash
## Test case for BZ-1121584. Execution of remove-brick status/stop command
## should give error for brick which is not part of volume.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../dht.rc
cleanup;
## Start glusterd
TEST glusterd
TEST pidof glusterd
## Lets Create and start volume
TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
TEST $CLI volume start $V0
## Start remove-brick operation on the volume
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
## By giving non existing brick for remove-brick status/stop command should
## give error.
TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
## By giving brick which is part of volume for remove-brick status/stop command
## should print statistics of remove-brick operation or stop remove-brick
## operation.
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 stop
cleanup;

View File

@ -1,37 +0,0 @@
#!/bin/bash
## Test case for cluster.min-free-disk option validation.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
## Start glusterd
TEST glusterd
TEST pidof glusterd
## Lets create and start volume
TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
## Setting invalid value for option cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk ""
TEST ! $CLI volume set $V0 min-free-disk 143.!/12
TEST ! $CLI volume set $V0 min-free-disk 123%
TEST ! $CLI volume set $V0 min-free-disk 194.34%
## Setting fractional value as a size (unit is byte) for option
## cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk 199.051
TEST ! $CLI volume set $V0 min-free-disk 111.999
## Setting valid value for option cluster.min-free-disk should pass
TEST $CLI volume set $V0 min-free-disk 12%
TEST $CLI volume set $V0 min-free-disk 56.7%
TEST $CLI volume set $V0 min-free-disk 120
TEST $CLI volume set $V0 min-free-disk 369.0000
cleanup;

View File

@ -1,34 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0
TEST $CLI_1 volume create $V1 $H1:$B1/$V1
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume start $V1
for i in {1..20}
do
$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
$CLI_1 volume set $V1 barrier on
$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
$CLI_2 volume set $V1 barrier on
done
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume status
TEST $CLI_2 volume status
cleanup;

View File

@ -1,82 +0,0 @@
#!/bin/bash
# Test case for quorum validation in glusterd for syncop framework
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Lets create the volume and set quorum type as a server
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
# Start the volume
TEST $CLI_1 volume start $V0
# Set quorum ratio 52. means 52 % or more than 52% nodes of total available node
# should be available for performing volume operation.
# i.e. Server-side quorum is met if the number of nodes that are available is
# greater than or equal to 'quorum-ratio' times the number of nodes in the
# cluster
TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
# Bring down 2nd glusterd
TEST kill_glusterd 2
# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
#(Command based on syncop framework)should fail
TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1
TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
TEST ! $CLI_1 volume set $V0 barrier enable
# Now execute a command which goes through op state machine and it should fail
TEST ! $CLI_1 volume profile $V0 start
# Volume set all command and volume reset all command should be successful
TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
TEST $CLI_1 volume reset all
# Bring back 2nd glusterd
TEST $glusterd_2
# After 2nd glusterd come back, there will be 2 nodes in a clusater
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
# Now quorum is meet.
# Add-brick, Remove-brick, volume-set command should success
TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
TEST $CLI_1 volume set $V0 barrier enable
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
## Stop the volume
TEST $CLI_1 volume stop $V0
## Bring down 2nd glusterd
TEST kill_glusterd 2
## Now quorum is not meet. Starting volume on 1st node should not success
TEST ! $CLI_1 volume start $V0
## Bring back 2nd glusterd
TEST $glusterd_2
# After 2nd glusterd come back, there will be 2 nodes in a clusater
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
## Now quorum is meet. Starting volume on 1st node should be success.
TEST $CLI_1 volume start $V0
# Now re-execute the same profile command and this time it should succeed
TEST $CLI_1 volume profile $V0 start
cleanup;

View File

@ -1,37 +0,0 @@
#!/bin/bash
## Test case for option features.uss validation.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
## Start glusterd
TEST glusterd;
TEST pidof glusterd;
## Lets create and start volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0
## Set features.uss option with non-boolean value. These non-boolean value
## for features.uss option should fail.
TEST ! $CLI volume set $V0 features.uss abcd
TEST ! $CLI volume set $V0 features.uss #$#$
TEST ! $CLI volume set $V0 features.uss 2324
## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60
## Set features.uss option with valid boolean value. It should succeed.
TEST $CLI volume set $V0 features.uss enable
TEST $CLI volume set $V0 features.uss disable
## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60
cleanup;

View File

@ -1,22 +0,0 @@
#!/bin/bash
## Test case for BZ-1199451 (gluster command should retrieve current op-version
## of the NODE)
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
## Start glusterd
TEST glusterd
TEST pidof glusterd
## Lets create and start volume
TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
## glusterd command should retrieve current op-version of the node
TEST $CLI volume get $V0 cluster.op-version
cleanup;

View File

@ -1,72 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
cleanup;
## Start glusterd
TEST glusterd;
TEST pidof glusterd;
## Lets create volume
TEST $CLI volume create $V0 $H0:$B0/${V0};
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
## Start volume and verify
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
##enable the bitrot and verify bitd is running or not
TEST $CLI volume bitrot $V0 enable
EXPECT 'on' volinfo_field $V0 'features.bitrot'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
##Do reset force which set the bitrot options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
##Do reset force which set the uss options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
##verify initial nfs disabled by default
EXPECT "0" get_nfs_count
##enable nfs and verify
TEST $CLI volume set $V0 nfs.disable off
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
EXPECT "1" get_nfs_count
##Do reset force which set the nfs.option to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
##Disable the uss option using set command and verify snapd
TEST $CLI volume set $V0 features.uss off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
##enable nfs.disable and verify
TEST $CLI volume set $V0 nfs.disable on
EXPECT 'on' volinfo_field $V0 'nfs.disable'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
## disable nfs.disable option using set command
TEST $CLI volume set $V0 nfs.disable off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
cleanup;

View File

@ -1,26 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
cleanup
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
kill_glusterd 2
TEST start_glusterd 2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
#volume stop should not crash
TEST $CLI_2 volume stop $V0
# check whether glusterd instance is running on H2 as this is the node which
# restored the volume configuration after a restart
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
cleanup

View File

@ -1,32 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2;
# Fool the cluster to operate with 3.5 version even though binary's op-version
# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
# been introduced in 3.6 version and onwards
GD1_WD=$($CLI_1 system getwd)
$CLI_1 system uuid get
TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
TEST kill_glusterd 1
TEST start_glusterd 1
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume status $V0;
cleanup;

View File

@ -1,35 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
TEST $CLI volume start $V0
#kill a brick process
kill_brick $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
#remove-brick start should fail as the brick is down
TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
#remove-brick start should succeed as the brick is up
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
#kill a brick process
kill_brick $V0 $H0 $B0/${V0}1
#remove-brick commit should pass even if the brick is down
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
cleanup;

View File

@ -1,31 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
$CLI_1 volume start $V0
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
#Mount FUSE
TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
TEST mkdir $M0/dir{1..4};
TEST touch $M0/dir{1..4}/files{1..4};
TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
TEST $CLI_1 volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
cleanup;

View File

@ -1,16 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd;
TEST pidof glusterd;
GDWD=$($CLI system getwd)
# glusterd.info file will be created on either first peer probe or volume
# creation, hence we expect file to be not present in this case
TEST ! -e $GDWD/glusterd.info
cleanup;

View File

@ -1,25 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
# Replace brick1 without killing the brick
TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST kill_brick $V0 $H0 $B0/${V0}1_new
# Replace brick1 after killing the brick
TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
cleanup;

View File

@ -1,56 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
cleanup
TEST launch_cluster 3;
TEST $CLI_1 peer probe $H2;
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
kill_glusterd 2
#remove-brick should fail as the peer hosting the brick is down
TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
TEST start_glusterd 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
#volume status should work
TEST $CLI_2 volume status
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
kill_glusterd 2
#remove-brick commit should fail as the peer hosting the brick is down
TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
TEST start_glusterd 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
#volume status should work
TEST $CLI_2 volume status
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
kill_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
TEST start_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
TEST $CLI_3 volume status
cleanup

View File

@ -1,67 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
#Create a distributed volume
TEST $CLI volume create $V0 $H0:$B0/${V00}{1..2};
TEST $CLI volume start $V0
#Setting data-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
TEST ! $CLI volume set $V0 cluster.data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
#Setting metadata-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
#Setting entry-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
TEST ! $CLI volume set $V0 cluster.entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
#Delete the volume
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
#Create a distribute-replicate volume
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
TEST $CLI volume start $V0
#Setting data-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V0 data-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.data-self-heal';
TEST $CLI volume set $V0 cluster.data-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.data-self-heal';
#Setting metadata-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V0 metadata-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.metadata-self-heal';
TEST $CLI volume set $V0 cluster.metadata-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.metadata-self-heal';
#Setting entry-self-heal option on for distribute-replicate volume
TEST $CLI volume set $V0 entry-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.entry-self-heal';
TEST $CLI volume set $V0 cluster.entry-self-heal on
EXPECT 'on' volinfo_field $V0 'cluster.entry-self-heal';
#Delete the volume
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
cleanup;

View File

@ -1,36 +0,0 @@
#!/bin/bash
## Test case for BZ 1266818;
## Disabling enable-shared-storage option should not delete user created
## volume with name glusterd_shared_storage
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
## Start a 2 node virtual cluster
TEST launch_cluster 2;
## Peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
## Creating a volume with name glusterd_shared_storage
TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B1/${V0}1
## Disabling enable-shared-storage should not succeed and should not delete the
## user created volume with name "glusterd_shared_storage"
TEST ! $CLI_1 volume all enable-shared-storage disable
## Volume with name should exist
TEST $CLI_1 volume info glusterd_shared_storage
cleanup;

View File

@ -1,31 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 4;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
TEST $CLI_1 peer probe $H4;
EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
# peers hosting bricks can't be detached
TEST ! $CLI_3 peer detach $H1
TEST ! $CLI_3 peer detach $H2
# peer not hosting bricks should be detachable
TEST $CLI_3 peer detach $H4
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
cleanup;

View File

@ -1,14 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
TEST $CLI volume set $V0 group virt;
cleanup;

View File

@ -1,22 +0,0 @@
#! /bin/bash
. $(dirname $0)/../../include.rc
# The test validates that lowering down the op-version should fail
cleanup
TEST glusterd
TEST pidof glusterd
#volume create is just to ensure glusterd.info file is created
TEST $CLI volume create $V0 $H0:$B0/b1
GDWD=$($CLI system getwd)
OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
cleanup;

View File

@ -1,31 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../env.rc
. $(dirname $0)/../../snapshot.rc
cleanup;
TEST verify_lvm_version
TEST glusterd
TEST pidof glusterd
TEST setup_lvm 1
TEST $CLI volume create $V0 $H0:$L1
TEST $CLI volume start $V0
TEST $CLI volume status $V0;
TEST touch $GLUSTERD_WORKDIR/vols/file
TEST $CLI snapshot create snap1 $V0 no-timestamp
TEST touch $GLUSTERD_WORKDIR/snaps/snap1/file
TEST killall_gluster
TEST glusterd
cleanup;

View File

@ -1,24 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
## Check that opRet field has correct value assigned for non existent volumes
## --------------------------------------------------------------------------
function get_opret_value () {
local VOL=$1
$CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
}
cleanup;
TEST glusterd;
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0;
EXPECT 0 get_opret_value $V0
EXPECT -1 get_opret_value "novol"
cleanup;

View File

@ -1,39 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
function volume_get_field()
{
local vol=$1
local field=$2
$CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
TEST $CLI_1 volume start $V0
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
#kill glusterd2 and do a volume set command to change the version
kill_glusterd 2
TEST $CLI_1 volume set $V0 performance.write-behind off
TEST start_glusterd 2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
#Check for handshake completion.
EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
#During hanndshake, if we failed to populate real_path,
#then volume create will fail.
TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1

View File

@ -1,19 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0
TEST kill_glusterd 2
TEST ! $CLI_1 volume delete $V0
cleanup;

View File

@ -1,63 +0,0 @@
#!/bin/bash
# Test case to check if bricks are down when quorum is not met
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Lets create the volume and set quorum type as a server
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
# Start the volume
TEST $CLI_1 volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
# Bring down 2nd and 3rd glusterd
TEST kill_glusterd 2
TEST kill_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
# Server quorum is not met. Brick on 1st node must be down
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
# should be available for performing volume operation.
# i.e. Server-side quorum is met if the number of nodes that are available is
# greater than or equal to 'quorum-ratio' times the number of nodes in the
# cluster
TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
# Bring back 2nd glusterd
TEST $glusterd_2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H2 $B2/${V0}2
# Bring back 3rd glusterd
TEST $glusterd_3
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Server quorum is met now. Bricks should be up on all nodes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
cleanup;

View File

@ -1,59 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
cleanup;
function get_rebalanced_info()
{
local rebal_info_key=$2
$CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
}
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
TEST $CLI volume start $V0;
#Mount volume and create data
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/dir{1..10}
TEST touch $M0/dir{1..10}/file{1..10}
# Add-brick and start rebalance
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
TEST $CLI volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
#Rebalance info before glusterd restart
OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
OLD_SIZE=$(get_rebalanced_info $V0 3)
OLD_SCANNED=$(get_rebalanced_info $V0 4)
OLD_FAILURES=$(get_rebalanced_info $V0 5)
OLD_SKIPPED=$(get_rebalanced_info $V0 6)
pkill glusterd;
pkill glusterfsd;
TEST glusterd
#Rebalance info after glusterd restart
NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
NEW_SIZE=$(get_rebalanced_info $V0 3)
NEW_SCANNED=$(get_rebalanced_info $V0 4)
NEW_FAILURES=$(get_rebalanced_info $V0 5)
NEW_SKIPPED=$(get_rebalanced_info $V0 6)
#Check rebalance info before and after glusterd restart
TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
TEST [ $OLD_SIZE == $NEW_SIZE ]
TEST [ $OLD_SCANNED == $NEW_SCANNED ]
TEST [ $OLD_FAILURES == $NEW_FAILURES ]
TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
cleanup;

View File

@ -1,37 +0,0 @@
#!/bin/bash
# Test case for checking whether the brick process(es) come up on a two node
# cluster if one of them is already down and other is going through a restart
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Lets create the volume
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}2
# Start the volume
TEST $CLI_1 volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
# Bring down all the gluster processes
TEST killall_gluster
#Bring back 1st glusterd and check whether the brick process comes back
TEST $glusterd_1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
#Enabling quorum should bring down the brick
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
cleanup;

View File

@ -1,40 +0,0 @@
#!/bin/bash
# Test case to check for successful startup of volume bricks on glusterd restart
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Lets create the volume and set quorum type as a server
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}1 $H2:$B2/${V0}2
TEST $CLI_1 volume create $V1 replica 2 $H1:$B1/${V1}1 $H2:$B2/${V1}2
# Start the volume
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
# Restart 2nd glusterd
TEST kill_glusterd 2
TEST $glusterd_2
# Check if all bricks are up
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
cleanup;

View File

@ -1,40 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd;
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
TEST kill_brick $V0 $H0 $B0/${V0}1
#add-brick should fail
TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
TEST kill_brick $V1 $H0 $B0/${V1}1
#add-brick should fail
TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
TEST $CLI volume start $V1 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
cleanup;

View File

@ -1,40 +0,0 @@
#!/bin/bash
# Test case for checking when server-quorum-ratio value is changed on one
# glusterd where the other is down, the other changes done get synced back
properly when the glusterd is brought up.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Lets create & start the volume
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
# Start the volume
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume set $V0 performance.readdir-ahead on
# Bring down 2nd glusterd
TEST kill_glusterd 2
TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
TEST $CLI_1 volume set $V0 performance.readdir-ahead off
# Bring back 2nd glusterd
TEST $glusterd_2
# After 2nd glusterd come back, there will be 2 nodes in a clusater
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
cleanup;

View File

@ -1,14 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
## Start glusterd
TEST glusterd;
TEST pidof glusterd;
TEST ! $CLI peer probe invalid-peer
TEST pidof glusterd;
cleanup;

View File

@ -1,68 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function count_up_bricks {
$CLI --xml volume status $1 | grep '<status>1' | wc -l
}
function count_brick_processes {
pgrep glusterfsd | wc -l
}
cleanup
TEST glusterd
TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
TEST $CLI volume set all cluster.brick-multiplex on
TEST $CLI volume start $V0
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
EXPECT 1 count_brick_processes
pkill glusterd
TEST glusterd
#Check brick status after restart glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
TEST $CLI volume stop $V0
TEST $CLI volume stop $V1
cleanup
TEST glusterd
TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
TEST $CLI volume set all cluster.brick-multiplex on
TEST $CLI volume start $V0
TEST $CLI volume start $V1
EXPECT 1 count_brick_processes
TEST $CLI volume set $V0 performance.cache-size 32MB
TEST $CLI volume stop $V0
TEST $CLI volume start $V0
#Check No. of brick processes after change option
EXPECT 2 count_brick_processes
pkill glusterd
TEST glusterd
#Check brick status after restart glusterd should not be NA
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
EXPECT 2 count_brick_processes
cleanup

View File

@ -1,47 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function count_up_bricks {
$CLI --xml volume status $1 | grep '<status>1' | wc -l
}
function count_brick_processes {
pgrep glusterfsd | wc -l
}
cleanup
TEST glusterd -LDEBUG
TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
TEST $CLI volume set all cluster.brick-multiplex on
TEST $CLI volume start $V0
TEST $CLI volume start $V1
EXPECT 1 count_brick_processes
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
TEST rm -rf $H0:$B0/brick{0,1}
#Check No. of brick processes after remove brick from back-end
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
EXPECT 1 count_brick_processes
TEST glusterfs -s $H0 --volfile-id $V1 $M0
TEST touch $M0/file{1..10}
pkill glusterd
TEST glusterd -LDEBUG
sleep 5
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks $V1
cleanup

View File

@ -1,54 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../traps.rc
. $(dirname $0)/../../volume.rc
function count_up_bricks {
$CLI --xml volume status all | grep '<status>1' | wc -l
}
function count_brick_processes {
pgrep glusterfsd | wc -l
}
function count_brick_pids {
$CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
| grep -v "N/A" | sort | uniq | wc -l
}
cleanup;
TEST glusterd
TEST $CLI volume set all cluster.brick-multiplex on
push_trapfunc "$CLI volume set all cluster.brick-multiplex off"
push_trapfunc "cleanup"
TEST $CLI volume create $V0 $H0:$B0/brick{0..2}
TEST $CLI volume start $V0
EXPECT 1 count_brick_processes
EXPECT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
pkill gluster
TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
pkill glusterd
TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
TEST $CLI volume create $V1 $H0:$B0/brick{3..5}
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 count_up_bricks

View File

@ -1,25 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
cleanup;
## Setting Port number in specific range
sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
## Start a 2 node virtual cluster
TEST launch_cluster 2;
## Peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
sysctl net.ipv4.ip_local_reserved_ports="
"
cleanup;

View File

@ -1,25 +0,0 @@
#!/bin/bash
#Test case: glusterd should disallow a volume level option to be set cluster
wide and glusterd should not crash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
#Create a 2x1 distributed volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0
TEST ! $CLI volume set all transport.listen-backlog 128
# Check the volume info output, if glusterd would have crashed then this command
# will fail
TEST $CLI volume info $V0;
cleanup;

View File

@ -1,58 +0,0 @@
#!/bin/bash
# Test case for quorum validation in glusterd for syncop framework
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Lets create the volume and set quorum type as a server
TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}0 $H2:$B2/${V0}1 $H3:$B3/${V0}2
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
# Start the volume
TEST $CLI_1 volume start $V0
# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
# should be available for performing volume operation.
# i.e. Server-side quorum is met if the number of nodes that are available is
# greater than or equal to 'quorum-ratio' times the number of nodes in the
# cluster
TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
# Bring down 2nd glusterd
TEST kill_glusterd 2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Now quorum is not meet. Now execute replace-brick command
# This command should fail as cluster is not in quorum
TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new commit force
# Bring 2nd glusterd up
TEST start_glusterd 2
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# checking peer_count is not enough to call that quorum is regained as
# peer_count is based on peerinfo->connected where as quorum is calculated based
# on peerinfo->quorum_contrib. To avoid this spurious race of replace brick
# commit force to execute and fail before the quorum is regained run the command
# in EXPECT_WITHIN to ensure that with multiple attempts the command goes
# through once the quorum is regained.
# Now quorum is met. replace-brick will execute successfuly
EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}1 $H1:$B1/${V0}1_new
#cleanup;

View File

@ -1,27 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup
TEST glusterd
TEST pidof glusterd
## Enable brick multiplexing
TEST $CLI volume set all cluster.brick-multiplex on
## creating 1x3 replicated volumes
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}_{1..3}
TEST $CLI volume create $V1 replica 3 $H0:$B1/${V1}_{1..3}
## Start the volume
TEST $CLI volume start $V0
TEST $CLI volume start $V1
kill -9 $(pgrep glusterfsd)
EXPECT 0 online_brick_count
cleanup

View File

@ -1,24 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
cleanup;
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
TEST $CLI_1 volume start $V0
# Negative case with brick not killed && volume-id xattrs present
TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
# Now test if reset-brick commit force works
TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
cleanup;

View File

@ -1,62 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
## Start and create a volume
TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
## Start volume and verify
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Setting soft-timeout as 20
TEST $CLI volume set $V0 features.soft-timeout 20
EXPECT '20' volinfo_field $V0 'features.soft-timeout';
## Enabling features.quota-deem-statfs
TEST ! $CLI volume set $V0 features.quota-deem-statfs on
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
## Enabling quota
TEST $CLI volume quota $V0 enable
EXPECT 'on' volinfo_field $V0 'features.quota'
## Setting soft-timeout as 20
TEST $CLI volume set $V0 features.soft-timeout 20
EXPECT '20' volinfo_field $V0 'features.soft-timeout';
## Enabling features.quota-deem-statfs
TEST $CLI volume set $V0 features.quota-deem-statfs on
EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
## Disabling quota
TEST $CLI volume quota $V0 disable
EXPECT 'off' volinfo_field $V0 'features.quota'
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
EXPECT '' volinfo_field $V0 'features.soft-timeout'
## Setting soft-timeout as 30
TEST $CLI volume set $V0 features.soft-timeout 30
EXPECT '30' volinfo_field $V0 'features.soft-timeout';
## Disabling features.quota-deem-statfs
TEST ! $CLI volume set $V0 features.quota-deem-statfs off
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
## Finish up
TEST $CLI volume stop $V0
EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,48 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
## Start and create a volume
TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
## Verify volume is is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
## Start volume and verify
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Setting performance cache min size as 2MB
TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
## Setting performance cache max size as 20MB
TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
## Trying to set performance cache min size as 25MB
TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
## Able to set performance cache min size as long as its lesser than max size
TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
## Trying it out with only cache-max-file-size in CLI as 10MB
TEST ! $CLI volume set $V0 cache-max-file-size 10MB
EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
## Finish up
TEST $CLI volume stop $V0;
EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,31 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}1
TEST $CLI volume set $V0 cluster.server-quorum-type server
EXPECT "server" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume set $V0 cluster.server-quorum-type none
EXPECT "none" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume reset $V0 cluster.server-quorum-type
TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
TEST ! $CLI volume set all cluster.server-quorum-type none
TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
TEST ! $CLI volume set all cluster.server-quorum-ratio abc
TEST ! $CLI volume set all cluster.server-quorum-ratio -1
TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
TEST $CLI volume set all cluster.server-quorum-ratio 0
EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100
EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100%
EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
cleanup;

View File

@ -1,70 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
glusterd;
TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
TEST ! $CLI volume set $V0 statedump-path ""
TEST ! $CLI volume set $V0 statedump-path " "
TEST $CLI volume set $V0 statedump-path "/home/"
EXPECT "/home/" volume_option $V0 server.statedump-path
TEST ! $CLI volume set $V0 background-self-heal-count ""
TEST ! $CLI volume set $V0 background-self-heal-count " "
TEST $CLI volume set $V0 background-self-heal-count 10
EXPECT "10" volume_option $V0 cluster.background-self-heal-count
TEST ! $CLI volume set $V0 cache-size ""
TEST ! $CLI volume set $V0 cache-size " "
TEST $CLI volume set $V0 cache-size 512MB
EXPECT "512MB" volume_option $V0 performance.cache-size
TEST ! $CLI volume set $V0 self-heal-daemon ""
TEST ! $CLI volume set $V0 self-heal-daemon " "
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT "on" volume_option $V0 cluster.self-heal-daemon
TEST ! $CLI volume set $V0 read-subvolume ""
TEST ! $CLI volume set $V0 read-subvolume " "
TEST $CLI volume set $V0 read-subvolume $V0-client-0
EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
TEST ! $CLI volume set $V0 data-self-heal-algorithm on
TEST $CLI volume set $V0 data-self-heal-algorithm full
EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
TEST ! $CLI volume set $V0 min-free-inodes ""
TEST ! $CLI volume set $V0 min-free-inodes " "
TEST $CLI volume set $V0 min-free-inodes 60%
EXPECT "60%" volume_option $V0 cluster.min-free-inodes
TEST ! $CLI volume set $V0 min-free-disk ""
TEST ! $CLI volume set $V0 min-free-disk " "
TEST $CLI volume set $V0 min-free-disk 60%
EXPECT "60%" volume_option $V0 cluster.min-free-disk
TEST $CLI volume set $V0 min-free-disk 120
EXPECT "120" volume_option $V0 cluster.min-free-disk
TEST ! $CLI volume set $V0 frame-timeout ""
TEST ! $CLI volume set $V0 frame-timeout " "
TEST $CLI volume set $V0 frame-timeout 0
EXPECT "0" volume_option $V0 network.frame-timeout
TEST ! $CLI volume set $V0 auth.allow ""
TEST ! $CLI volume set $V0 auth.allow " "
TEST $CLI volume set $V0 auth.allow 192.168.122.1
EXPECT "192.168.122.1" volume_option $V0 auth.allow
TEST ! $CLI volume set $V0 stripe-block-size ""
TEST ! $CLI volume set $V0 stripe-block-size " "
TEST $CLI volume set $V0 stripe-block-size 512MB
EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
cleanup;

View File

@ -1,46 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
V1="patchy2"
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
function check_brick()
{
vol=$1;
num=$2
$CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
}
function volinfo_field()
{
local vol=$1;
local field=$2;
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
}
function brick_count()
{
local vol=$1;
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
EXPECT '2' brick_count $V0
EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
cleanup;

View File

@ -1,29 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3;
function brick_count()
{
local vol=$1;
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
TEST $CLI volume start $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
EXPECT '2' brick_count $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
EXPECT '1' brick_count $V0
cleanup;

View File

@ -1,24 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
# Check if xml output is generated correctly for volume status for a single brick
# present on another peer and no async tasks are running.
function get_peer_count {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
TEST $CLI_1 volume stop $V0
cleanup

View File

@ -1,56 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
function volume_count {
local cli=$1;
if [ $cli -eq '1' ] ; then
$CLI_1 volume info | grep 'Volume Name' | wc -l;
else
$CLI_2 volume info | grep 'Volume Name' | wc -l;
fi
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
b="B1";
#Create an extra file in the originator's volume store
touch ${!b}/glusterd/vols/$V0/run/file
TEST $CLI_1 volume stop $V0
#Test for self-commit failure
TEST $CLI_1 volume delete $V0
#Check whether delete succeeded on both the nodes
EXPECT "0" volume_count '1'
EXPECT "0" volume_count '2'
#Check whether the volume name can be reused after deletion
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
TEST $CLI_1 volume start $V0
#Create an extra file in the peer's volume store
touch ${!b}/glusterd/vols/$V0/run/file
TEST $CLI_1 volume stop $V0
#Test for commit failure on the other node
TEST $CLI_2 volume delete $V0
EXPECT "0" volume_count '1';
EXPECT "0" volume_count '2';
cleanup;

View File

@ -1,36 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
#test functionality of post-op-delay-secs
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
#Strings should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
#-ve ints should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
#INT_MAX+1 should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
#floats should not be accepted.
TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
#min val 0 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
#max val 2147483647 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
#some middle val in range 2147 should be accepted
TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
cleanup;

View File

@ -1,14 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd;
TEST pidof glusterd;
TEST ! $CLI volume set $V0 performance.open-behind off;
TEST pidof glusterd;
cleanup;

View File

@ -1,57 +0,0 @@
#!/bin/bash
# Test that a volume becomes unwritable when the cluster loses quorum.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function check_fs {
df $1 &> /dev/null
echo $?
}
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
function online_brick_count {
$CLI_1 --xml volume status | grep '<status>1' | wc -l
}
cleanup;
TEST launch_cluster 3; # start 3-node virtual cluster
TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
TEST $CLI_1 volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count;
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
# Kill one pseudo-node, make sure the others survive and volume stays up.
TEST kill_node 3;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
# Kill another pseudo-node, make sure the last one dies and volume goes down.
TEST kill_node 2;
EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
#two glusterfsds of the other two glusterds must be dead
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
TEST $glusterd_2;
TEST $glusterd_3;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count; # restore quorum, all ok
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
cleanup

View File

@ -1,19 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
cleanup;
TEST glusterd;
TEST $CLI volume create $V0 $H0:$B0/${V0}1;
TEST $CLI volume start $V0;
pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
kill -SIGKILL $brick_pid;
TEST $CLI volume start $V0 force;
TEST process_leak_count $(pidof glusterd);
cleanup;

View File

@ -1,46 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
#setup cluster and test volume
TEST launch_cluster 3; # start 3-node virtual cluster
TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume start $V0
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
#kill a node
TEST kill_node 3
#modify volume config to see change in volume-sync
TEST $CLI_1 volume set $V0 write-behind off
#add some files to the volume to see effect of volume-heal cmd
TEST touch $M0/{1..100};
TEST $CLI_1 volume stop $V0;
TEST $glusterd_3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
TEST $CLI_3 volume start $V0;
TEST $CLI_2 volume stop $V0;
TEST $CLI_2 volume delete $V0;
cleanup;
TEST glusterd;
TEST $CLI volume create $V0 $H0:$B0/$V0
TEST $CLI volume start $V0
pkill glusterd;
pkill glusterfsd;
TEST glusterd
TEST $CLI volume status $V0
cleanup;

View File

@ -1,27 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
function get_brick_host_uuid()
{
local vol=$1;
local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
echo $host_uuid_list | awk '{print $1}'
}
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
EXPECT $uuid get_brick_host_uuid $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,21 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume info;
touch $GLUSTERD_WORKDIR/groups/test
echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume set $V0 group test
EXPECT "off" volume_option $V0 performance.read-ahead
EXPECT "off" volume_option $V0 performance.open-behind
cleanup;

View File

@ -1,48 +0,0 @@
#!/bin/bash
#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
#Basic checks
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
#Create a 3x3 dist-rep volume
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8};
TEST $CLI volume start $V0
# Mount FUSE and create file/directory
TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST touch $M0/zerobytefile.txt
TEST mkdir $M0/test_dir
TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
function remove_brick_start {
$CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed'
}
function remove_brick {
$CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed'
}
#remove-brick start variant
#Actual message displayed at cli is:
#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
EXPECT "failed" remove_brick_start;
#remove-brick commit-force
#Actual message displayed at cli is:
#"volume remove-brick commit force: success"
EXPECT "success" remove_brick
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -1,36 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
TEST $CLI volume start $V0;
# Start a remove-brick and try to start a rebalance/remove-brick without committing
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
TEST ! $CLI volume rebalance $V0 start
TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
"$V0" "$H0:$B0/${V0}1"
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
gluster volume status
TEST $CLI volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
TEST $CLI volume rebalance $V0 stop
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
TEST $CLI volume stop $V0
cleanup;

View File

@ -1,30 +0,0 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
function volume_count {
local cli=$1;
if [ $cli -eq '1' ] ; then
$CLI_1 volume info | grep 'Volume Name' | wc -l;
else
$CLI_2 volume info | grep 'Volume Name' | wc -l;
fi
}
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start
TEST $CLI_1 volume status
cleanup;

View File

@ -0,0 +1,82 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function peer_count {
eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
## start a 3 node virtual cluster
TEST launch_cluster 3;
## peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
#test case for bug 1266818 - disabling enable-shared-storage option
##should not delete user created volume with name glusterd_shared_storage
## creating a volume with name glusterd_shared_storage
TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B2/${V0}1
TEST $CLI_1 volume start glusterd_shared_storage
## disabling enable-shared-storage should not succeed and should not delete the
## user created volume with name "glusterd_shared_storage"
TEST ! $CLI_1 volume all enable-shared-storage disable
## volume with name should exist
TEST $CLI_1 volume info glusterd_shared_storage
#testcase: bug-1245045-remove-brick-validation
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
kill_glusterd 2
#remove-brick should fail as the peer hosting the brick is down
TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
TEST $glusterd_2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
#volume status should work
TEST $CLI_2 volume status
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
kill_glusterd 2
#remove-brick commit should fail as the peer hosting the brick is down
TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
TEST $glusterd_2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
#volume status should work
TEST $CLI_2 volume status
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
kill_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
TEST start_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
TEST $CLI_3 volume status
cleanup

View File

@ -0,0 +1,59 @@
#! /bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2
#bug-1109741 - validate mgmt handshake
TEST ! $CLI_3 peer probe $H1
GD1_WD=$($CLI_1 system getwd)
OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
GD3_WD=$($CLI_3 system getwd)
TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
TEST kill_glusterd 3
TEST start_glusterd 3
TEST ! $CLI_3 peer probe $H1
OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
#bug-948686 - volume sync after bringing up the killed node
TEST $CLI_1 peer probe $H3
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume start $V0
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
#kill a node
TEST kill_node 3
#modify volume config to see change in volume-sync
TEST $CLI_1 volume set $V0 write-behind off
#add some files to the volume to see effect of volume-heal cmd
TEST touch $M0/{1..100};
TEST $CLI_1 volume stop $V0;
TEST $glusterd_3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
TEST $CLI_3 volume start $V0;
TEST $CLI_2 volume stop $V0;
TEST $CLI_2 volume delete $V0;
cleanup

View File

@ -0,0 +1,97 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
function peer_count {
eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
#bug-1454418 - Setting Port number in specific range
sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
TEST launch_cluster 3;
#bug-1223213
# Fool the cluster to operate with 3.5 version even though binary's op-version
# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
# been introduced in 3.6 version and onwards
GD1_WD=$($CLI_1 system getwd)
$CLI_1 system uuid get
Old_op_version=$(cat ${GD1_WD}/glusterd.info | grep operating-version | cut -d '=' -f 2)
TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
TEST kill_glusterd 1
TEST start_glusterd 1
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
TEST `sed -i "s/"30500"/${Old_op_version}/g" ${GD1_WD}/glusterd.info`
TEST kill_glusterd 1
TEST start_glusterd 1
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
#bug-1454418
sysctl net.ipv4.ip_local_reserved_ports="
"
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
#bug-888752 - volume status --xml from peer in the cluster
TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
TEST $CLI_1 volume stop $V0
TEST $CLI_1 volume delete $V0
TEST $CLI_1 volume create $V0 $H1:$B1/$V0
TEST $CLI_1 volume create $V1 $H1:$B1/$V1
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume start $V1
#bug-1173414 - validate mgmt-v3-remote-lock-failure
for i in {1..20}
do
$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
$CLI_1 volume set $V1 barrier on
$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
$CLI_2 volume set $V1 barrier on
done
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
TEST $CLI_1 volume status
TEST $CLI_2 volume status
#bug-1293414 - validate peer detach
# peers hosting bricks cannot be detached
TEST ! $CLI_2 peer detach $H1
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
# peer not hosting bricks should be detachable
TEST $CLI_2 peer detach $H3
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
#bug-1344407 - deleting a volume when peer is down should fail
TEST kill_glusterd 2
TEST ! $CLI_1 volume delete $V0
cleanup

View File

@ -0,0 +1,273 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
function get_opret_value () {
local VOL=$1
$CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
}
function check_brick()
{
vol=$1;
num=$2
$CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
}
function brick_count()
{
local vol=$1;
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
function get_brick_host_uuid()
{
local vol=$1;
local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
echo $host_uuid_list | awk '{print $1}'
}
cleanup;
TEST glusterd;
TEST pidof glusterd;
#bug-1238135-lazy-daemon-initialization-on-demand
GDWD=$($CLI system getwd)
# glusterd.info file will be created on either first peer probe or volume
# creation, hence we expect file to be not present in this case
TEST ! -e $GDWD/glusterd.info
#bug-913487 - setting volume options before creation of volume should fail
TEST ! $CLI volume set $V0 performance.open-behind off;
TEST pidof glusterd;
#bug-1433578 - glusterd should not crash after probing a invalid peer
TEST ! $CLI peer probe invalid-peer
TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
EXPECT 'Created' volinfo_field $V0 'Status';
#bug-955588 - uuid validation
uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
EXPECT $uuid get_brick_host_uuid $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
#bug-958790 - set options from file
touch $GLUSTERD_WORKDIR/groups/test
echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume set $V0 group test
EXPECT "off" volume_option $V0 performance.read-ahead
EXPECT "off" volume_option $V0 performance.open-behind
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
#bug-1321836 - validate opret value for non existing volume
EXPECT 0 get_opret_value $V0
EXPECT -1 get_opret_value "novol"
EXPECT '2' brick_count $V0
#bug-862834 - validate brick status
EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash
TEST ! $CLI volume set all transport.listen-backlog 128
# Check the volume info output, if glusterd would have crashed then this command
# will fail
TEST $CLI volume info $V0;
#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
TEST $CLI volume get all cluster.op-version
#bug-1315186 - reject-lowering-down-op-version
OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
#bug-1022055 - validate log rotate command
TEST $CLI volume log rotate $V0;
#bug-1092841 - validating barrier enable/disable
TEST $CLI volume barrier $V0 enable;
TEST ! $CLI volume barrier $V0 enable;
TEST $CLI volume barrier $V0 disable;
TEST ! $CLI volume barrier $V0 disable;
#bug-1095097 - validate volume profile command
TEST $CLI volume profile $V0 start
TEST $CLI volume profile $V0 info
#bug-839595 - validate server-quorum options
TEST $CLI volume set $V0 cluster.server-quorum-type server
EXPECT "server" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume set $V0 cluster.server-quorum-type none
EXPECT "none" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume reset $V0 cluster.server-quorum-type
TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
TEST ! $CLI volume set all cluster.server-quorum-type none
TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
TEST ! $CLI volume set all cluster.server-quorum-ratio abc
TEST ! $CLI volume set all cluster.server-quorum-ratio -1
TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
TEST $CLI volume set all cluster.server-quorum-ratio 0
EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100
EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100%
EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
#bug-1265479 - validate-distributed-volume-options
#Setting data-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
TEST ! $CLI volume set $V0 cluster.data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
#Setting metadata-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
#Setting entry-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
TEST ! $CLI volume set $V0 cluster.entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
#bug-1163108 - validate min-free-disk-option
## Setting invalid value for option cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk ""
TEST ! $CLI volume set $V0 min-free-disk 143.!/12
TEST ! $CLI volume set $V0 min-free-disk 123%
TEST ! $CLI volume set $V0 min-free-disk 194.34%
## Setting fractional value as a size (unit is byte) for option
## cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk 199.051
TEST ! $CLI volume set $V0 min-free-disk 111.999
## Setting valid value for option cluster.min-free-disk should pass
TEST $CLI volume set $V0 min-free-disk 12%
TEST $CLI volume set $V0 min-free-disk 56.7%
TEST $CLI volume set $V0 min-free-disk 120
TEST $CLI volume set $V0 min-free-disk 369.0000
#bug-1179175-uss-option-validation
## Set features.uss option with non-boolean value. These non-boolean value
## for features.uss option should fail.
TEST ! $CLI volume set $V0 features.uss abcd
TEST ! $CLI volume set $V0 features.uss #$#$
TEST ! $CLI volume set $V0 features.uss 2324
## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60
## Set features.uss option with valid boolean value. It should succeed.
TEST $CLI volume set $V0 features.uss enable
TEST $CLI volume set $V0 features.uss disable
## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60
#bug-1209329 - daemon-svcs-on-reset-volume
##enable the bitrot and verify bitd is running or not
TEST $CLI volume bitrot $V0 enable
EXPECT 'on' volinfo_field $V0 'features.bitrot'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
##Do reset force which set the bitrot options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
##Do reset force which set the uss options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
##verify initial nfs disabled by default
EXPECT "0" get_nfs_count
##enable nfs and verify
TEST $CLI volume set $V0 nfs.disable off
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
EXPECT "1" get_nfs_count
##Do reset force which set the nfs.option to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
##Disable the uss option using set command and verify snapd
TEST $CLI volume set $V0 features.uss off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
##enable nfs.disable and verify
TEST $CLI volume set $V0 nfs.disable on
EXPECT 'on' volinfo_field $V0 'nfs.disable'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
## disable nfs.disable option using set command
TEST $CLI volume set $V0 nfs.disable off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
TEST $CLI volume info;
TEST $CLI volume create $V1 $H0:$B0/${V1}1
TEST $CLI volume start $V1
pkill glusterd;
pkill glusterfsd;
TEST glusterd
TEST $CLI volume status $V1
cleanup

View File

@ -0,0 +1,115 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
TEST launch_cluster 2
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
TEST $CLI_1 volume start $V0
#bug-1177132 - sync server quorum options when a node is brought up
TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
#Bring down 2nd glusterd
TEST kill_glusterd 2
#bug-1104642 - sync server quorum options when a node is brought up
#set the volume all options from the 1st glusterd
TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
#(Command based on syncop framework)should fail
TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
TEST ! $CLI_1 volume set $V0 barrier enable
# Now execute a command which goes through op state machine and it should fail
TEST ! $CLI_1 volume profile $V0 start
#Bring back the 2nd glusterd
TEST $glusterd_2
#verify whether the value has been synced
EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio
# Now quorum is meet.
# Add-brick, Remove-brick, volume-set command should success
TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
TEST $CLI_1 volume set $V0 barrier enable
TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
## Stop the volume
TEST $CLI_1 volume stop $V0
## Bring down 2nd glusterd
TEST kill_glusterd 2
## Now quorum is not meet. Starting volume on 1st node should not success
TEST ! $CLI_1 volume start $V0
## Bring back 2nd glusterd
TEST $glusterd_2
# After 2nd glusterd come back, there will be 2 nodes in a cluster
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
## Now quorum is meet. Starting volume on 1st node should be success.
TEST $CLI_1 volume start $V0
# Now re-execute the same profile command and this time it should succeed
TEST $CLI_1 volume profile $V0 start
#bug-1352277
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
TEST $CLI_1 volume set $V0 cluster.server-quorum-type none
# Bring down all the gluster processes
TEST killall_gluster
#bring back 1st glusterd and check whether the brick process comes back
TEST $glusterd_1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
#enabling quorum should bring down the brick
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}0
TEST $glusterd_2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
#bug-1367478 - brick processes should not be up when quorum is not met
TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
TEST $CLI_1 volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
# Restart 2nd glusterd
TEST kill_glusterd 2
TEST $glusterd_2
# Check if all bricks are up
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V1 $H2 $B2/${V1}2
cleanup

View File

@ -4,7 +4,6 @@
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
@ -17,6 +16,21 @@ EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
$CLI_1 volume start $V0
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
#bug-1231437
#Mount FUSE
TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
TEST mkdir $M0/dir{1..4};
TEST touch $M0/dir{1..4}/files{1..4};
TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
TEST $CLI_1 volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
#bug-1245142
$CLI_1 volume rebalance $V0 start &
#kill glusterd2 after requst sent, so that call back is called
#with rpc->status fail ,so roughly 1sec delay is introduced to get this scenario.
@ -26,3 +40,4 @@ kill_glusterd 2
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
cleanup;

View File

@ -0,0 +1,131 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
cleanup;
function get_rebalanced_info()
{
local rebal_info_key=$2
$CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
}
volname="StartMigrationDuringRebalanceTest"
TEST glusterd
TEST pidof glusterd;
TEST $CLI volume info;
TEST $CLI volume create $volname $H0:$B0/${volname}{1..4};
TEST $CLI volume start $volname;
#bug-1046308 - validate rebalance on a specified volume name
TEST $CLI volume rebalance $volname start;
#bug-1089668 - validation of rebalance status and remove brick status
#bug-963541 - after remove brick start rebalance/remove brick start without commiting should fail
TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}1 status
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 start
TEST ! $CLI volume rebalance $volname start
TEST ! $CLI volume rebalance $volname status
TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
"$volname" "$H0:$B0/${volname}1"
TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 commit
TEST $CLI volume rebalance $volname start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
TEST $CLI volume rebalance $volname stop
TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 stop
#bug-1351021-rebalance-info-post-glusterd-restart
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
TEST $CLI volume start $V0;
#Mount volume and create data
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/dir{1..10}
TEST touch $M0/dir{1..10}/file{1..10}
# Add-brick and start rebalance
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
TEST $CLI volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
#Rebalance info before glusterd restart
OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
OLD_SIZE=$(get_rebalanced_info $V0 3)
OLD_SCANNED=$(get_rebalanced_info $V0 4)
OLD_FAILURES=$(get_rebalanced_info $V0 5)
OLD_SKIPPED=$(get_rebalanced_info $V0 6)
pkill glusterd;
pkill glusterfsd;
TEST glusterd
#Rebalance info after glusterd restart
NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
NEW_SIZE=$(get_rebalanced_info $V0 3)
NEW_SCANNED=$(get_rebalanced_info $V0 4)
NEW_FAILURES=$(get_rebalanced_info $V0 5)
NEW_SKIPPED=$(get_rebalanced_info $V0 6)
#Check rebalance info before and after glusterd restart
TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
TEST [ $OLD_SIZE == $NEW_SIZE ]
TEST [ $OLD_SCANNED == $NEW_SCANNED ]
TEST [ $OLD_FAILURES == $NEW_FAILURES ]
TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
#bug-1004744 - validation of rebalance fix layout
TEST $CLI volume start $V0 force
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
for i in `seq 11 20`;
do
mkdir $M0/dir_$i
echo file>$M0/dir_$i/file_$i
for j in `seq 1 100`;
do
mkdir $M0/dir_$i/dir_$j
echo file>$M0/dir_$i/dir_$j/file_$j
done
done
#add 2 bricks
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{5,6};
#perform rebalance fix-layout
TEST $CLI volume rebalance $V0 fix-layout start
EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
#bug-1075087 - rebalance post add brick
TEST mkdir $M0/dir{21..30};
TEST touch $M0/dir{21..30}/files{1..10};
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
TEST $CLI volume rebalance $V0 start force
EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
TEST pkill gluster
TEST glusterd
TEST pidof glusterd
# status should be "completed" immediate after glusterd has respawned.
EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
cleanup

View File

@ -1,23 +1,32 @@
#!/bin/bash
## Test case for BZ:1230121 glusterd crashed while trying to remove a bricks
## one selected from each replica set - after shrinking nX3 to nX2 to nX1
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
cleanup;
## Start a 2 node virtual cluster
TEST launch_cluster 2;
TEST pidof glusterd
## Peer probe server 2 from server 1 cli
#bug-1047955 - remove brick from new peer in cluster
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
TEST $CLI_1 volume start $V0;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
TEST $CLI_2 volume info
#bug-964059 - volume status post remove brick start
TEST $CLI_1 volume create $V1 $H1:$B1/${V1}0 $H2:$B2/${V1}1
TEST $CLI_1 volume start $V1
TEST $CLI_1 volume remove-brick $V1 $H2:$B2/${V1}1 start
TEST $CLI_1 volume status
TEST $CLI_1 volume stop $V0
TEST $CLI_1 volume delete $V0
#bug-1230121 - decrease replica count by remove-brick and increse by add-brick
## Creating a 2x3 replicate volume
TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
$H1:$B1/brick3 $H2:$B2/brick4 \
@ -26,7 +35,6 @@ TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
## Start the volume
TEST $CLI_1 volume start $V0
## Shrinking volume replica 2x3 to 2x2 by performing remove-brick operation.
TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick1 $H2:$B2/brick6 force
@ -37,7 +45,6 @@ TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 forc
TEST $CLI_1 volume remove-brick $V0 replica 1 $H1:$B1/brick5 force
### Expanding volume replica by performing add-brick operation.
## Expend volume replica from 1x1 to 1x2 by performing add-brick operation
@ -49,4 +56,5 @@ TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
## Expend volume replica from 2x2 to 2x3 by performing add-brick operation
TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick6 force
cleanup;
cleanup

View File

@ -0,0 +1,119 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
function brick_count()
{
local vol=$1;
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
cleanup
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..5}
TEST $CLI volume start $V0
#bug-1225716 - remove-brick on a brick which is down should fail
#kill a brick process
kill_brick $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
#remove-brick start should fail as the brick is down
TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
#remove-brick start should succeed as the brick is up
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
#kill a brick process
kill_brick $V0 $H0 $B0/${V0}1
#remove-brick commit should pass even if the brick is down
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
#bug-1121584 - brick-existing-validation-for-remove-brick-status-stop
## Start remove-brick operation on the volume
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
## By giving non existing brick for remove-brick status/stop command should
## give error.
TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
## By giving brick which is part of volume for remove-brick status/stop command
## should print statistics of remove-brick operation or stop remove-brick
## operation.
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 status
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
#bug-878004 - validate remove brick force
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
EXPECT '3' brick_count $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
EXPECT '2' brick_count $V0
#bug-1027171 - Do not allow commit if the bricks are not decommissioned
#Remove bricks and commit without starting
function remove_brick_commit_status {
$CLI volume remove-brick $V0 \
$H0:$B0/${V0}4 commit 2>&1 |grep -oE "success|decommissioned"
}
EXPECT "decommissioned" remove_brick_commit_status;
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
#Create a 2X3 distributed-replicate volume
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
TEST $CLI volume start $V0
#Try to reduce replica count with start option
function remove_brick_start_status {
$CLI volume remove-brick $V0 replica 2 \
$H0:$B0/${V0}3 $H0:$B0/${V0}6 start 2>&1 |grep -oE "success|failed"
}
EXPECT "failed" remove_brick_start_status;
#Remove bricks with commit option
function remove_brick_commit_status2 {
$CLI volume remove-brick $V0 replica 2 \
$H0:$B0/${V0}3 $H0:$B0/${V0}6 commit 2>&1 |
grep -oE "success|decommissioned"
}
EXPECT "decommissioned" remove_brick_commit_status2;
TEST $CLI volume info $V0
#bug-1040408 - reduce replica count of distributed replicate volume
# Reduce to 2x2 volume by specifying bricks in reverse order
function remove_brick_status {
$CLI volume remove-brick $V0 replica 2 \
$H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
}
EXPECT "success" remove_brick_status;
TEST $CLI volume info $V0
#bug-1120647 - remove brick validation
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} start
EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}5"
EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}4"
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} commit
TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}2 force
cleanup

View File

@ -1,8 +1,5 @@
#!/bin/bash
#Test case: Create a distributed replicate volume, and remove multiple
#replica pairs in a single remove-brick command.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
@ -17,6 +14,7 @@ TEST $CLI volume info
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
TEST $CLI volume start $V0
#bug-974007 - remove multiple replica pairs in a single brick command
# Mount FUSE and create files
TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST touch $M0/file{1..10}
@ -41,12 +39,41 @@ function remove_brick_commit_status {
}
EXPECT "success" remove_brick_commit_status;
# Check the volume type
EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
#bug-961669 - remove brick start should fail when reducing the replica count
#Create a 3x3 dist-rep volume
TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
TEST $CLI volume start $V1
# Mount FUSE and create file/directory
TEST glusterfs -s $H0 --volfile-id $V1 $M0
TEST touch $M0/zerobytefile.txt
TEST mkdir $M0/test_dir
TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
function remove_brick_start {
$CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} start 2>&1|grep -oE 'success|failed'
}
function remove_brick {
$CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} force 2>&1|grep -oE 'success|failed'
}
#remove-brick start variant
#Actual message displayed at cli is:
#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
EXPECT "failed" remove_brick_start;
#remove-brick commit-force
#Actual message displayed at cli is:
#"volume remove-brick commit force: success"
EXPECT "success" remove_brick
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;

View File

@ -0,0 +1,48 @@
#!/bin/bash
## Test case for BZ: 1094119 Remove replace-brick support from gluster
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
# Start glusterd
TEST glusterd
TEST pidof glusterd
## Lets create and start volume
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
TEST $CLI volume start $V0
#bug-1094119-remove-replace-brick-support-from-glusterd
## Now with this patch replace-brick only accept following commad
## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
## Apart form this replace brick command will failed.
TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 status
TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 abort
## replace-brick commit force command should success
TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 commit force
#bug-1242543-replace-brick validation
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
# Replace brick1 without killing
TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST kill_brick $V0 $H0 $B0/${V0}1_new
# Replace brick1 after killing the brick
TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
cleanup;

View File

@ -1,9 +1,4 @@
#!/bin/bash
# This test checks for if shd or any other daemons brought down (apart from
# brick processes) is not brought up automatically when glusterd on the other
# node is (re)started
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
@ -19,24 +14,35 @@ function shd_up_status_2 {
function get_shd_pid_2 {
$CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $8}'
}
cleanup;
TEST launch_cluster 3
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
TEST $CLI_1 volume start $V0
#testcase: bug-1507466 - validate reset-brick commit force
# Negative case with brick not killed && volume-id xattrs present
TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
# Now test if reset-brick commit force works
TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
#testcase: bug-1383893 - shd should not come up after restarting the peer glusterd
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Lets create the volume
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}1 $H2:$B2/${V0}2
# Start the volume
TEST $CLI_1 volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B0/${V0}
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B0/${V0}
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2

View File

@ -7,6 +7,7 @@
cleanup;
TEST verify_lvm_version
TEST launch_cluster 3;
TEST setup_lvm 3;
@ -20,8 +21,17 @@ EXPECT 'Created' volinfo_field $V0 'Status'
TEST $CLI_1 volume start $V0
EXPECT 'Started' volinfo_field $V0 'Status'
#bug-1318591 - skip-non-directories-inside-vols
b="B1"
TEST touch ${!b}/glusterd/vols/file
TEST $CLI_1 snapshot create snap1 $V0 no-timestamp;
TEST touch ${!b}/glusterd/snaps/snap1/file
#bug-1322145 - peer hosting snapshotted bricks should not be detachable
kill_glusterd 2
TEST $CLI_1 peer probe $H3;
@ -29,8 +39,12 @@ EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume replace-brick $V0 $H2:$L2 $H3:$L3 commit force
# peer hosting snapshotted bricks should not be detachable
TEST ! $CLI_1 peer detach $H2
TEST killall_gluster
TEST $glusterd_1
TEST $glusterd_2
cleanup;

View File

@ -0,0 +1,54 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../cluster.rc
function volume_get_field()
{
local vol=$1
local field=$2
$CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
}
cleanup
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume set $V0 performance.readdir-ahead on
# Bring down 2nd glusterd
TEST kill_glusterd 2
##bug-1420637 and bug-1323287 - sync post glusterd restart
TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
TEST $CLI_1 volume set $V0 performance.readdir-ahead off
TEST $CLI_1 volume set $V0 performance.write-behind off
# Bring back 2nd glusterd
TEST $glusterd_2
# After 2nd glusterd come back, there will be 2 nodes in a cluster
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
#bug-1420637-volume sync post glusterd restart
EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
#bug-1323287
EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
#bug-1213295 - volume stop should not crash glusterd post glusterd restart
TEST $CLI_2 volume stop $V0
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
cleanup

View File

@ -0,0 +1,144 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
## start volume and verify
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
#bug-1314649 - validate group virt
TEST $CLI volume set $V0 group virt;
#bug-765230 - remove-quota-related-option-after-disabling-quota
## setting soft-timeout as 20
TEST $CLI volume set $V0 features.soft-timeout 20
EXPECT '20' volinfo_field $V0 'features.soft-timeout';
## enabling features.quota-deem-statfs
TEST ! $CLI volume set $V0 features.quota-deem-statfs on
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
## enabling quota
TEST $CLI volume quota $V0 enable
EXPECT 'on' volinfo_field $V0 'features.quota'
## eetting soft-timeout as 20
TEST $CLI volume set $V0 features.soft-timeout 20
EXPECT '20' volinfo_field $V0 'features.soft-timeout';
## enabling features.quota-deem-statfs
TEST $CLI volume set $V0 features.quota-deem-statfs on
EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
## disabling quota
TEST $CLI volume quota $V0 disable
EXPECT 'off' volinfo_field $V0 'features.quota'
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
EXPECT '' volinfo_field $V0 'features.soft-timeout'
## setting soft-timeout as 30
TEST $CLI volume set $V0 features.soft-timeout 30
EXPECT '30' volinfo_field $V0 'features.soft-timeout';
## disabling features.quota-deem-statfs
TEST ! $CLI volume set $V0 features.quota-deem-statfs off
EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
#bug-859927 - validate different options for striped replicated volume
TEST ! $CLI volume set $V0 statedump-path ""
TEST ! $CLI volume set $V0 statedump-path " "
TEST $CLI volume set $V0 statedump-path "/home/"
EXPECT "/home/" volume_option $V0 server.statedump-path
TEST ! $CLI volume set $V0 background-self-heal-count ""
TEST ! $CLI volume set $V0 background-self-heal-count " "
TEST $CLI volume set $V0 background-self-heal-count 10
EXPECT "10" volume_option $V0 cluster.background-self-heal-count
TEST ! $CLI volume set $V0 cache-size ""
TEST ! $CLI volume set $V0 cache-size " "
TEST $CLI volume set $V0 cache-size 512MB
EXPECT "512MB" volume_option $V0 performance.cache-size
TEST ! $CLI volume set $V0 self-heal-daemon ""
TEST ! $CLI volume set $V0 self-heal-daemon " "
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT "on" volume_option $V0 cluster.self-heal-daemon
TEST ! $CLI volume set $V0 read-subvolume ""
TEST ! $CLI volume set $V0 read-subvolume " "
TEST $CLI volume set $V0 read-subvolume $V0-client-0
EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
TEST ! $CLI volume set $V0 data-self-heal-algorithm on
TEST $CLI volume set $V0 data-self-heal-algorithm full
EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
TEST ! $CLI volume set $V0 min-free-inodes ""
TEST ! $CLI volume set $V0 min-free-inodes " "
TEST $CLI volume set $V0 min-free-inodes 60%
EXPECT "60%" volume_option $V0 cluster.min-free-inodes
TEST ! $CLI volume set $V0 min-free-disk ""
TEST ! $CLI volume set $V0 min-free-disk " "
TEST $CLI volume set $V0 min-free-disk 60%
EXPECT "60%" volume_option $V0 cluster.min-free-disk
TEST $CLI volume set $V0 min-free-disk 120
EXPECT "120" volume_option $V0 cluster.min-free-disk
TEST ! $CLI volume set $V0 frame-timeout ""
TEST ! $CLI volume set $V0 frame-timeout " "
TEST $CLI volume set $V0 frame-timeout 0
EXPECT "0" volume_option $V0 network.frame-timeout
TEST ! $CLI volume set $V0 auth.allow ""
TEST ! $CLI volume set $V0 auth.allow " "
TEST $CLI volume set $V0 auth.allow 192.168.122.1
EXPECT "192.168.122.1" volume_option $V0 auth.allow
TEST ! $CLI volume set $V0 stripe-block-size ""
TEST ! $CLI volume set $V0 stripe-block-size " "
TEST $CLI volume set $V0 stripe-block-size 512MB
EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
#bug-782095 - validate performance cache min/max size value
## setting performance cache min size as 2MB
TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
## setting performance cache max size as 20MB
TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
## trying to set performance cache min size as 25MB
TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
## able to set performance cache min size as long as its lesser than max size
TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
## trying it out with only cache-max-file-size in CLI as 10MB
TEST ! $CLI volume set $V0 cache-max-file-size 10MB
EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
## finish up
TEST $CLI volume stop $V0;
EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup

View File

@ -0,0 +1,110 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
function check_fs {
df $1 &> /dev/null
echo $?
}
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
TEST launch_cluster 3
TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 peer probe $H3;
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Lets create the volume
TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
# Start the volume
TEST $CLI_1 volume start $V0
#bug-1345727 - bricks should be down when quorum is not met
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
# Bring down glusterd on 2nd node
TEST kill_glusterd 2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST kill_glusterd 3
EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
# Server quorum is not met. Brick on 1st node must be down
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
# should be available for performing volume operation.
# i.e. Server-side quorum is met if the number of nodes that are available is
# greater than or equal to 'quorum-ratio' times the number of nodes in the
# cluster
TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
#bug-1483058 - replace-brick should fail when quorum is not met
TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}2 $H1:$B1/${V0}2_new commit force
#Bring back 2nd glusterd
TEST $glusterd_2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status_1 $V0 $H2 $B2/${V0}2
# Bring back 3rd glusterd
TEST $glusterd_3
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
# Server quorum is met now. Bricks should be up on all nodes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
# quorum is met. replace-brick will execute successfully
EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}2 $H2:$B2/${V0}2_new
TEST $CLI_1 volume reset all
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2_new
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H3 $B3/${V0}3
#bug-913555 - volume should become unwritable when quorum does not met
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
# Kill one pseudo-node, make sure the others survive and volume stays up.
TEST kill_node 3;
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status_1 $V0 $H2 $B2/${V0}2_new
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
# Kill another pseudo-node, make sure the last one dies and volume goes down.
TEST kill_node 2;
EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
#two glusterfsds of the other two glusterds must be dead
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status_1 $V0 $H1 $B1/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
TEST $glusterd_2;
TEST $glusterd_3;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
cleanup