PROBLEM: During 'volume delete', when glusterd fails to erase all information about a volume from the backend store (for instance because rmdir() failed on non-empty directories), not only does volume delete fail on that node, but also subsequent attempts to restart glusterd fail because the volume store is left in an inconsistent state. FIX: Rename the volume directory path to a new location <working-dir>/trash/<volume-id>.deleted, and then go on to clean up its contents. The volume is considered deleted once rename() succeeds, irrespective of whether the cleanup succeeds or not. Change-Id: Iaf18e1684f0b101808bd5e1cd53a5d55790541a8 BUG: 889630 Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com> Reviewed-on: http://review.gluster.org/4639 Reviewed-by: Amar Tumballi <amarts@redhat.com> Reviewed-by: Kaushal M <kaushal@redhat.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com>
57 lines
1.3 KiB
Bash
Executable File
57 lines
1.3 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
. $(dirname $0)/../include.rc
|
|
. $(dirname $0)/../cluster.rc
|
|
|
|
function check_peers {
|
|
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
|
|
}
|
|
|
|
function volume_count {
|
|
local cli=$1;
|
|
if [ $cli -eq '1' ] ; then
|
|
$CLI_1 volume info | grep 'Volume Name' | wc -l;
|
|
else
|
|
$CLI_2 volume info | grep 'Volume Name' | wc -l;
|
|
fi
|
|
}
|
|
|
|
cleanup;
|
|
|
|
TEST launch_cluster 2;
|
|
TEST $CLI_1 peer probe $H2;
|
|
|
|
EXPECT_WITHIN 20 1 check_peers
|
|
|
|
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
|
|
TEST $CLI_1 volume start $V0
|
|
|
|
b="B1";
|
|
|
|
#Create an extra file in the originator's volume store
|
|
touch ${!b}/glusterd/vols/$V0/run/file
|
|
|
|
TEST $CLI_1 volume stop $V0
|
|
#Test for self-commit failure
|
|
TEST $CLI_1 volume delete $V0
|
|
|
|
#Check whether delete succeeded on both the nodes
|
|
EXPECT "0" volume_count '1'
|
|
EXPECT "0" volume_count '2'
|
|
|
|
#Check whether the volume name can be reused after deletion
|
|
TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
|
|
TEST $CLI_1 volume start $V0
|
|
|
|
#Create an extra file in the peer's volume store
|
|
touch ${!b}/glusterd/vols/$V0/run/file
|
|
|
|
TEST $CLI_1 volume stop $V0
|
|
#Test for commit failure on the other node
|
|
TEST $CLI_2 volume delete $V0
|
|
|
|
EXPECT "0" volume_count '1';
|
|
EXPECT "0" volume_count '2';
|
|
|
|
cleanup;
|