326a47939d
When one of the subvolume is down, then lock request is not attempted on that subvolume and move on to the next subvolume. /* skip over children that are down */ while ((child_index < priv->child_count) && !local->child_up[child_index]) child_index++; In the above case if there are 2 subvolumes and 2nd subvolume is down (subvolume 1 from afr's view), then after attempting lock on 1st child (i.e subvolume 0) child index is calculated to be 1. But since the 2nd child is down child_index is incremented to 2 as per the above logic and lock request is STACK_WINDed to the child with child_index 2. Since there are only 2 children for afr the child (i.e the xlator_t pointer) for child_index will be NULL. The process crashes when it dereference the NULL xlator object. Change-Id: Icd9b5ad28bac1b805e6e80d53c12d296526bedf5 BUG: 765564 Signed-off-by: Raghavendra Bhat <raghavendra@redhat.com> Reviewed-on: http://review.gluster.org/4438 Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Avati <avati@redhat.com>
84 lines
2.0 KiB
Bash
84 lines
2.0 KiB
Bash
#!/bin/bash
|
|
|
|
. $(dirname $0)/../include.rc
|
|
. $(dirname $0)/../volume.rc
|
|
|
|
TEST glusterd
|
|
TEST pidof glusterd
|
|
|
|
## Start and create a volume
|
|
mkdir -p ${B0}/${V0}-0
|
|
mkdir -p ${B0}/${V0}-1
|
|
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
|
|
|
|
TEST $CLI volume set $V0 performance.io-cache off;
|
|
TEST $CLI volume set $V0 performance.write-behind off;
|
|
TEST $CLI volume set $V0 performance.stat-prefetch off
|
|
|
|
TEST $CLI volume start $V0;
|
|
|
|
## Mount native
|
|
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
|
|
|
|
#returns success if 'olddir' is absent
|
|
#'olddir' must be absent in both replicas
|
|
function rm_succeeded () {
|
|
local dir1=$1
|
|
[[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0
|
|
return 1
|
|
}
|
|
|
|
# returns successes if 'newdir' is present
|
|
#'newdir' must be present in both replicas
|
|
function mv_succeeded () {
|
|
local dir1=$1
|
|
[[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1
|
|
return 0
|
|
}
|
|
|
|
# returns zero on success
|
|
# Only one of rm and mv can succeed. This is captured by the XOR below
|
|
|
|
function chk_backend_consistency(){
|
|
local dir1=$1
|
|
local dir2=$2
|
|
local rm_status=rm_succeeded $dir1
|
|
local mv_status=mv_succeeded $dir2
|
|
[[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0
|
|
return 1
|
|
}
|
|
|
|
#concurrent removal/rename of dirs
|
|
function rm_mv_correctness () {
|
|
ret=0
|
|
for i in {1..100}; do
|
|
mkdir $M0/"dir"$i
|
|
rmdir $M0/"dir"$i &
|
|
mv $M0/"dir"$i $M0/"adir"$i &
|
|
wait
|
|
tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i)
|
|
(( ret += tmp_ret ))
|
|
rm -rf $M0/"dir"$i
|
|
rm -rf $M0/"adir"$i
|
|
done
|
|
return $ret
|
|
}
|
|
|
|
TEST touch $M0/a;
|
|
TEST mv $M0/a $M0/b;
|
|
|
|
#test rename fop when one of the bricks is down
|
|
kill_brick ${V0} ${H0} ${B0}/${V0}-1;
|
|
TEST touch $M0/h;
|
|
TEST mv $M0/h $M0/1;
|
|
|
|
TEST $CLI volume start $V0 force;
|
|
|
|
EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1;
|
|
find $M0 | xargs stat 2>/dev/null 1>/dev/null;
|
|
|
|
TEST rm_mv_correctness;
|
|
TEST umount $M0;
|
|
cleanup;
|
|
|