afr: not resolve splitbrains when copies are of same size
Automatic Splitbrain with size as policy must not resolve splitbrains when the copies are of same size. Determining if the sizes of copies are same and returning -1 in that case. updates: bz#1655052 Change-Id: I3d8e8b4d7962b070ed16c3ee02a1e5a926fd5eab Signed-off-by: Iraj Jamali <ijamali@redhat.com>
This commit is contained in:
parent
cd57145546
commit
67bc377568
55
tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
Executable file
55
tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
Executable file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
#Test the split-brain resolution CLI commands.
|
||||
. $(dirname $0)/../../include.rc
|
||||
. $(dirname $0)/../../volume.rc
|
||||
|
||||
cleanup;
|
||||
|
||||
TEST glusterd
|
||||
TEST pidof glusterd
|
||||
|
||||
#Create replica 2 volume
|
||||
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
|
||||
TEST $CLI volume set $V0 performance.write-behind off
|
||||
TEST $CLI volume set $V0 cluster.self-heal-daemon off
|
||||
TEST $CLI volume set $V0 cluster.entry-self-heal off
|
||||
TEST $CLI volume set $V0 cluster.data-self-heal off
|
||||
TEST $CLI volume set $V0 cluster.metadata-self-heal off
|
||||
TEST $CLI volume start $V0
|
||||
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
|
||||
TEST touch $M0/file
|
||||
|
||||
############ Healing using favorite-child-policy = size and size of bricks is same #################
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}1
|
||||
TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}0
|
||||
TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
|
||||
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
||||
TEST $CLI volume set $V0 cluster.self-heal-daemon on
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
|
||||
#file still in split-brain
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
|
||||
cat $M0/file > /dev/null
|
||||
EXPECT_NOT "^0$" echo $?
|
||||
|
||||
#We know that both bricks have same size file
|
||||
TEST $CLI volume set $V0 cluster.favorite-child-policy size
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
|
||||
cat $M0/file > /dev/null
|
||||
EXPECT_NOT "^0$" echo $?
|
||||
|
||||
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
||||
cleanup
|
||||
|
@ -1163,7 +1163,8 @@ afr_sh_fav_by_ctime(xlator_t *this, struct afr_reply *replies, inode_t *inode)
|
||||
}
|
||||
|
||||
/*
|
||||
* afr_sh_fav_by_size: Choose favorite child by size.
|
||||
* afr_sh_fav_by_size: Choose favorite child by size
|
||||
* when not all files are of zero size.
|
||||
*/
|
||||
int
|
||||
afr_sh_fav_by_size(xlator_t *this, struct afr_reply *replies, inode_t *inode)
|
||||
@ -1175,24 +1176,30 @@ afr_sh_fav_by_size(xlator_t *this, struct afr_reply *replies, inode_t *inode)
|
||||
|
||||
priv = this->private;
|
||||
for (i = 0; i < priv->child_count; i++) {
|
||||
if (replies[i].valid == 1) {
|
||||
gf_msg_debug(this->name, 0,
|
||||
"Child:%s file size = %" PRIu64 " for gfid %s",
|
||||
priv->children[i]->name, replies[i].poststat.ia_size,
|
||||
uuid_utoa(inode->gfid));
|
||||
if (replies[i].poststat.ia_type == IA_IFDIR) {
|
||||
gf_msg(this->name, GF_LOG_ERROR, 0,
|
||||
AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
|
||||
"Cannot perform selfheal on %s. "
|
||||
"Size policy is not applicable to directories.",
|
||||
uuid_utoa(inode->gfid));
|
||||
break;
|
||||
}
|
||||
if (replies[i].poststat.ia_size > cmp_sz) {
|
||||
cmp_sz = replies[i].poststat.ia_size;
|
||||
fav_child = i;
|
||||
}
|
||||
if (!replies[i].valid) {
|
||||
continue;
|
||||
}
|
||||
gf_msg_debug(this->name, 0,
|
||||
"Child:%s file size = %" PRIu64 " for gfid %s",
|
||||
priv->children[i]->name, replies[i].poststat.ia_size,
|
||||
uuid_utoa(inode->gfid));
|
||||
if (replies[i].poststat.ia_type == IA_IFDIR) {
|
||||
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SBRAIN_FAV_CHILD_POLICY,
|
||||
"Cannot perform selfheal on %s. "
|
||||
"Size policy is not applicable to directories.",
|
||||
uuid_utoa(inode->gfid));
|
||||
break;
|
||||
}
|
||||
if (replies[i].poststat.ia_size > cmp_sz) {
|
||||
cmp_sz = replies[i].poststat.ia_size;
|
||||
fav_child = i;
|
||||
} else if (replies[i].poststat.ia_size == cmp_sz) {
|
||||
fav_child = -1;
|
||||
}
|
||||
}
|
||||
if (fav_child == -1) {
|
||||
gf_msg(this->name, GF_LOG_ERROR, 0, AFR_MSG_SPLIT_BRAIN,
|
||||
"No bigger file");
|
||||
}
|
||||
return fav_child;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user