tests: Fix entry-self-heal.t

Because both bricks in the replica pair of patchy are in the same node,
both full healer threads within the same shd try and fail to acquire
non-blocking locks when each one gets lock on one of the bricks,
causing heal to fail occasionally.

Now heals are triggered from the mount as part of inode refresh.
And because the AFR on the mount graph
a. does not treat presence of dirty xattrs as something that needs
   a heal (this is true for dirs fool_heal and fool_me) and
b. does not recursively heal the entire hierarchy of subdirs and their
   entries in one shot (this is true with source_creations_heal/dir1),

index heal is used to heal fool_heal, fool_me and
source_creations_heal/dir1 wherein only one brick (which is the brick that
contains the good copy of source_creations_heal/dir_1: brick-1) has all
the gfids to be healed copied into its indices/xattrop directory.

Change-Id: I46df4188f16d1623f20cc0d7266b3afaeca6c31f
BUG: 1163543
Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com>
Reviewed-on: http://review.gluster.org/10916
Tested-by: NetBSD Build System
Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
This commit is contained in:
Krutika Dhananjay 2015-05-25 16:42:50 +05:30 committed by Pranith Kumar Karampuri
parent f59a1431e3
commit 158b047f79
3 changed files with 66 additions and 4 deletions

View File

@ -196,7 +196,6 @@ function is_bad_test ()
local name=$1
for bt in ./tests/basic/volume-snapshot-clone.t \
./tests/basic/uss.t \
./tests/basic/afr/entry-self-heal.t \
./tests/bugs/replicate/bug-1015990.t \
./tests/basic/ec/quota.t \
./tests/basic/tier/tier.t \

View File

@ -1,5 +1,19 @@
#!/bin/bash
function create_brick_xattrop_entry {
local xattrop_dir=$(afr_get_index_path $1)
local base_entry=`ls $xattrop_dir`
local gfid_str
local params=`echo "$@" | cut -d' ' -f2-`
echo $params
for file in $params
do
gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $1/$file))
ln $xattrop_dir/$base_entry $xattrop_dir/$gfid_str
done
}
function diff_dirs {
diff <(ls $1 | sort) <(ls $2 | sort)
}

View File

@ -4,6 +4,7 @@
#as expected.
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../afr.rc
cleanup;
@ -71,9 +72,16 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.read-ahead off
TEST $CLI volume set $V0 performance.readdir-ahead off
TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no
cd $M0
#_me_ is dir on which missing entry self-heal happens, _heal is where dir self-heal happens
#spb is split-brain, fool is all fool
@ -250,11 +258,52 @@ EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1
TEST [ -d source_creations_me/dir1 ]
#Trigger heal and check _heal dirs are healed properly
TEST $CLI volume set $V0 self-heal-daemon on
#Trigger change in event generation number. That way inodes would get refreshed during lookup
TEST kill_brick $V0 $H0 $B0/${V0}1
$CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST stat spb_heal
TEST stat spb_me_heal
TEST stat fool_heal
TEST stat fool_me
TEST stat v1_fool_heal
TEST stat v1_fool_me
TEST stat source_deletions_heal
TEST stat source_deletions_me
TEST stat source_creations_heal
TEST stat source_creations_me
TEST stat v1_dirty_heal
TEST stat v1_dirty_me
TEST $CLI volume stop $V0
TEST rm -rf $B0/${V0}{0,1}/.glusterfs/indices/xattrop/*
$CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
#Create base entry in indices/xattrop
echo "Data" > $M0/FILE
rm -f $M0/FILE
EXPECT "1" count_index_entries $B0/${V0}0
EXPECT "1" count_index_entries $B0/${V0}1
TEST $CLI volume stop $V0;
#Create entries for fool_heal and fool_me to ensure they are fully healed and dirty xattrs erased, before triggering index heal
create_brick_xattrop_entry $B0/${V0}0 fool_heal fool_me source_creations_heal/dir1
$CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
$CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0 full
TEST $CLI volume heal $V0;
EXPECT_WITHIN $HEAL_TIMEOUT "~" print_pending_heals spb_heal spb_me_heal fool_heal fool_me v1_fool_heal v1_fool_me source_deletions_heal source_deletions_me source_creations_heal source_creations_me v1_dirty_heal v1_dirty_me
EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_heal