cluster/ec: Make background healing optional behavior
Provide options to control number of active background heal count and qlen. Change-Id: Idc2419219d881f47e7d2e9bbc1dcdd999b372033 BUG: 1237381 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/11473 Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> Tested-by: Gluster Build System <jenkins@build.gluster.com>
This commit is contained in:
parent
214101394b
commit
c6742adc98
@ -37,9 +37,11 @@
|
||||
*/
|
||||
#define GD_OP_VERSION_MIN 1 /* MIN is the fresh start op-version, mostly
|
||||
should not change */
|
||||
#define GD_OP_VERSION_MAX 30702 /* MAX VERSION is the maximum count in VME
|
||||
table, should keep changing with
|
||||
introduction of newer versions */
|
||||
#define GD_OP_VERSION_MAX GD_OP_VERSION_3_7_3 /* MAX VERSION is the maximum
|
||||
count in VME table, should
|
||||
keep changing with
|
||||
introduction of newer
|
||||
versions */
|
||||
|
||||
#define GD_OP_VERSION_3_6_0 30600 /* Op-Version for GlusterFS 3.6.0 */
|
||||
|
||||
@ -49,6 +51,8 @@
|
||||
|
||||
#define GD_OP_VERSION_3_7_2 30702 /* Op-version for GlusterFS 3.7.2 */
|
||||
|
||||
#define GD_OP_VERSION_3_7_3 30703 /* Op-version for GlusterFS 3.7.3 */
|
||||
|
||||
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
|
||||
|
||||
#include "xlator.h"
|
||||
|
@ -60,7 +60,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT 0 afr_get_pending_heal_count $V0
|
||||
EXPECT 0 get_pending_heal_count $V0
|
||||
|
||||
# I/O can resume again.
|
||||
TEST cat $M0/file
|
||||
|
@ -33,7 +33,7 @@ TEST chmod +x $M0/mdatafile
|
||||
#pending entry heal. Also causes pending metadata/data heals on file{1..5}
|
||||
TEST touch $M0/dir/file{1..5}
|
||||
|
||||
EXPECT 8 afr_get_pending_heal_count $V0
|
||||
EXPECT 8 get_pending_heal_count $V0
|
||||
|
||||
#After brick comes back up, access from client should not trigger heals
|
||||
TEST $CLI volume start $V0 force
|
||||
@ -54,7 +54,7 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
|
||||
TEST ls $M0/dir
|
||||
|
||||
#No heal must have happened
|
||||
EXPECT 8 afr_get_pending_heal_count $V0
|
||||
EXPECT 8 get_pending_heal_count $V0
|
||||
|
||||
#Enable heal client side heal options and trigger heals
|
||||
TEST $CLI volume set $V0 cluster.data-self-heal on
|
||||
@ -63,7 +63,7 @@ TEST $CLI volume set $V0 cluster.entry-self-heal on
|
||||
|
||||
#Metadata heal is triggered by lookup without need for inode refresh.
|
||||
TEST ls $M0/mdatafile
|
||||
EXPECT 7 afr_get_pending_heal_count $V0
|
||||
EXPECT 7 get_pending_heal_count $V0
|
||||
|
||||
#Inode refresh must trigger data and entry heals.
|
||||
#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
|
||||
@ -74,7 +74,7 @@ TEST cat $M0/datafile
|
||||
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
||||
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
|
||||
TEST ls $M0/dir
|
||||
EXPECT 5 afr_get_pending_heal_count $V0
|
||||
EXPECT 5 get_pending_heal_count $V0
|
||||
|
||||
TEST cat $M0/dir/file1
|
||||
TEST cat $M0/dir/file2
|
||||
@ -82,5 +82,5 @@ TEST cat $M0/dir/file3
|
||||
TEST cat $M0/dir/file4
|
||||
TEST cat $M0/dir/file5
|
||||
|
||||
EXPECT 0 afr_get_pending_heal_count $V0
|
||||
EXPECT 0 get_pending_heal_count $V0
|
||||
cleanup;
|
||||
|
@ -43,7 +43,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
|
||||
# Wait for heal to complete
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
# Check if entry-heal has happened
|
||||
TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
|
||||
|
@ -20,6 +20,6 @@ echo abc > $M0/a
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
||||
find $M0 | xargs stat > /dev/null
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
cleanup
|
||||
|
@ -53,7 +53,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check all files created/deleted on brick1 are also replicated on brick 0
|
||||
#(i.e. no reverse heal has happened)
|
||||
@ -82,7 +82,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
TEST test -d $B0/brick0/file
|
||||
@ -105,7 +105,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
EXPECT "777" stat -c %a $B0/brick0/file
|
||||
@ -129,7 +129,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
EXPECT "$NEW_UID$NEW_GID" stat -c %u%g $B0/brick0/file
|
||||
@ -160,7 +160,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
EXPECT 0 stat -c %s $B0/brick1/file
|
||||
@ -183,7 +183,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file
|
||||
@ -207,7 +207,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#check heal has happened in the correct direction
|
||||
TEST test -f $B0/brick0/hard_link_to_file
|
||||
@ -233,7 +233,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1)
|
||||
TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3)
|
||||
|
@ -68,7 +68,7 @@ done
|
||||
HEAL_FILES=$(($HEAL_FILES + 3))
|
||||
|
||||
cd ~
|
||||
EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
|
||||
EXPECT "$HEAL_FILES" get_pending_heal_count $V0
|
||||
|
||||
#When bricks are down, it says Transport End point Not connected for them
|
||||
EXPECT "3" disconnected_brick_count $V0
|
||||
@ -78,12 +78,12 @@ EXPECT "3" disconnected_brick_count $V0
|
||||
#replica pair.
|
||||
for i in {11..20}; do echo abc > $M0/$i; done
|
||||
HEAL_FILES=$(($HEAL_FILES + 10)) #count extra 10 files
|
||||
EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
|
||||
EXPECT "$HEAL_FILES" get_pending_heal_count $V0
|
||||
#delete the files now, so that stale indices will remain.
|
||||
for i in {11..20}; do rm -f $M0/$i; done
|
||||
#After deleting files they should not appear in heal info
|
||||
HEAL_FILES=$(($HEAL_FILES - 10))
|
||||
EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
|
||||
EXPECT "$HEAL_FILES" get_pending_heal_count $V0
|
||||
|
||||
|
||||
TEST ! $CLI volume heal $V0
|
||||
@ -99,10 +99,10 @@ check_bricks_up $V0
|
||||
TEST $CLI volume heal $V0
|
||||
sleep 5 #Until the heal-statistics command implementation
|
||||
#check that this heals the contents partially
|
||||
TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
|
||||
TEST [ $HEAL_FILES -gt $(get_pending_heal_count $V0) ]
|
||||
|
||||
TEST $CLI volume heal $V0 full
|
||||
EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
|
||||
|
||||
#Test that ongoing IO is not considered as Pending heal
|
||||
(dd if=/dev/zero of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
|
||||
@ -115,7 +115,7 @@ back_pid3=$!;
|
||||
back_pid4=$!;
|
||||
(dd if=/dev/zero of=$M0/file5 bs=1k 2>/dev/null 1>/dev/null)&
|
||||
back_pid5=$!;
|
||||
EXPECT 0 afr_get_pending_heal_count $V0
|
||||
EXPECT 0 get_pending_heal_count $V0
|
||||
kill -SIGTERM $back_pid1;
|
||||
kill -SIGTERM $back_pid2;
|
||||
kill -SIGTERM $back_pid3;
|
||||
@ -132,13 +132,13 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
|
||||
EXPECT "off" volume_option $V0 cluster.data-self-heal
|
||||
kill_multiple_bricks $V0 $H0 $B0
|
||||
echo abc > $M0/f
|
||||
EXPECT 1 afr_get_pending_heal_count $V0
|
||||
EXPECT 1 get_pending_heal_count $V0
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN 20 "Y" glustershd_up_status
|
||||
check_bricks_up $V0
|
||||
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
|
||||
TEST $CLI volume set $V0 cluster.data-self-heal on
|
||||
|
||||
#METADATA
|
||||
@ -147,13 +147,13 @@ EXPECT "off" volume_option $V0 cluster.metadata-self-heal
|
||||
kill_multiple_bricks $V0 $H0 $B0
|
||||
|
||||
TEST chmod 777 $M0/f
|
||||
EXPECT 1 afr_get_pending_heal_count $V0
|
||||
EXPECT 1 get_pending_heal_count $V0
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN 20 "Y" glustershd_up_status
|
||||
check_bricks_up $V0
|
||||
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
|
||||
TEST $CLI volume set $V0 cluster.metadata-self-heal on
|
||||
|
||||
#ENTRY
|
||||
@ -163,13 +163,13 @@ kill_multiple_bricks $V0 $H0 $B0
|
||||
TEST touch $M0/d/a
|
||||
# 4 if mtime/ctime is modified for d in bricks without a
|
||||
# 2 otherwise
|
||||
PENDING=$( afr_get_pending_heal_count $V0 )
|
||||
PENDING=$( get_pending_heal_count $V0 )
|
||||
TEST test $PENDING -eq 2 -o $PENDING -eq 4
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN 20 "Y" glustershd_up_status
|
||||
check_bricks_up $V0
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
|
||||
TEST $CLI volume set $V0 cluster.entry-self-heal on
|
||||
|
||||
#Negative test cases
|
||||
|
@ -49,7 +49,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST gluster volume heal $V0 full
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#If the file system of bricks is XFS and speculative preallocation is on,
|
||||
#dropping cahce should be done to free speculatively pre-allocated blocks
|
||||
@ -119,7 +119,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
|
||||
TEST gluster volume heal $V0 full
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#If the file system of bricks is XFS and speculative preallocation is on,
|
||||
#dropping cahce should be done to free speculatively pre-allocated blocks
|
||||
|
@ -38,7 +38,7 @@ TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
|
||||
|
||||
EXPECT 4 afr_get_pending_heal_count $V0
|
||||
EXPECT 4 get_pending_heal_count $V0
|
||||
|
||||
TEST ! cat $M0/data-split-brain.txt
|
||||
TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
|
||||
@ -82,6 +82,6 @@ TEST setfattr -n replica.split-brain-heal-finalize -v $V0-client-1 $M0/data-spli
|
||||
EXPECT "brick0" get_text_xattr user.test $M0/metadata-split-brain.txt
|
||||
EXPECT "brick1_alive" cat $M0/data-split-brain.txt
|
||||
|
||||
EXPECT 0 afr_get_pending_heal_count $V0
|
||||
EXPECT 0 get_pending_heal_count $V0
|
||||
|
||||
cleanup;
|
||||
|
100
tests/basic/ec/ec-background-heals.t
Normal file
100
tests/basic/ec/ec-background-heals.t
Normal file
@ -0,0 +1,100 @@
|
||||
#!/bin/bash
|
||||
|
||||
. $(dirname $0)/../../include.rc
|
||||
. $(dirname $0)/../../volume.rc
|
||||
|
||||
# This test checks background heals option
|
||||
|
||||
cleanup
|
||||
TEST glusterd
|
||||
TEST pidof glusterd
|
||||
TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..2}
|
||||
TEST $CLI volume heal $V0 disable
|
||||
TEST $CLI volume set $V0 performance.stat-prefetch off
|
||||
TEST $CLI volume set $V0 performance.write-behind off
|
||||
TEST $CLI volume set $V0 performance.quick-read off
|
||||
TEST $CLI volume set $V0 performance.read-ahead off
|
||||
TEST $CLI volume set $V0 performance.io-cache off
|
||||
TEST $CLI volume set $V0 disperse.background-heals 0
|
||||
TEST $CLI volume start $V0
|
||||
|
||||
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST touch $M0/a
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}2
|
||||
echo abc > $M0/a
|
||||
EXPECT 2 get_pending_heal_count $V0 #One for each active brick
|
||||
$CLI volume start $V0 force
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
|
||||
#Accessing file shouldn't heal the file
|
||||
EXPECT "abc" cat $M0/a
|
||||
sleep 3
|
||||
EXPECT 2 get_pending_heal_count $V0 #One for each active brick
|
||||
TEST $CLI volume set $V0 disperse.background-heals 1
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "1" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "128" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
#Accessing file should heal the file now
|
||||
EXPECT "abc" cat $M0/a
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
|
||||
|
||||
#Test above test cases with reset instead of setting background-heals to 1
|
||||
TEST $CLI volume set $V0 disperse.heal-wait-qlength 1024
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "1" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "1024" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST $CLI volume set $V0 disperse.background-heals 0
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST $CLI volume set $V0 disperse.heal-wait-qlength 200 #Changing qlength shouldn't affect anything now
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}2
|
||||
echo abc > $M0/a
|
||||
EXPECT 2 get_pending_heal_count $V0 #One for each active brick
|
||||
$CLI volume start $V0 force
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
|
||||
#Accessing file shouldn't heal the file
|
||||
EXPECT "abc" cat $M0/a
|
||||
sleep 3
|
||||
EXPECT 2 get_pending_heal_count $V0 #One for each active brick
|
||||
TEST $CLI volume reset $V0 disperse.background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "8" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "200" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
#Accessing file should heal the file now
|
||||
EXPECT "abc" cat $M0/a
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
|
||||
|
||||
#Test that disabling background-heals still drains the queue
|
||||
TEST $CLI volume set $V0 disperse.background-heals 1
|
||||
TEST touch $M0/{a,b,c,d}
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}2
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "1" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "200" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST truncate -s 1GB $M0/a
|
||||
echo abc > $M0/b
|
||||
echo abc > $M0/c
|
||||
echo abc > $M0/d
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
|
||||
TEST chown root:root $M0/{a,b,c,d}
|
||||
TEST $CLI volume set $V0 disperse.background-heals 0
|
||||
EXPECT_NOT "0" mount_get_option_value $M0 $V0-disperse-0 heal-waiters
|
||||
TEST truncate -s 0 $M0/a # This completes the heal fast ;-)
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
|
||||
|
||||
#Test that background heals get rejected on meeting background-qlen limit
|
||||
TEST $CLI volume set $V0 disperse.background-heals 1
|
||||
TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
|
||||
TEST kill_brick $V0 $H0 $B0/${V0}2
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "1" mount_get_option_value $M0 $V0-disperse-0 background-heals
|
||||
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "0" mount_get_option_value $M0 $V0-disperse-0 heal-wait-qlength
|
||||
TEST truncate -s 1GB $M0/a
|
||||
echo abc > $M0/b
|
||||
echo abc > $M0/c
|
||||
echo abc > $M0/d
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
|
||||
TEST chown root:root $M0/{a,b,c,d}
|
||||
EXPECT "0" mount_get_option_value $M0 $V0-disperse-0 heal-waiters
|
||||
cleanup
|
@ -25,7 +25,7 @@ done
|
||||
HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times
|
||||
|
||||
cd ~
|
||||
EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
|
||||
EXPECT "$HEAL_FILES" get_pending_heal_count $V0
|
||||
TEST rm -f $M0/*
|
||||
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
|
||||
TEST $CLI volume heal $V0 info
|
||||
|
@ -55,7 +55,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon on
|
||||
TEST $CLI volume start $V0 force
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
|
||||
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
|
||||
#Check external xattrs match
|
||||
EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo)
|
||||
|
@ -43,6 +43,6 @@ TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick1
|
||||
#Two entries for DIR and two for FILE
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "4" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
|
||||
TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
|
||||
cleanup
|
||||
|
@ -50,7 +50,7 @@ TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0
|
||||
TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/mdatafile
|
||||
|
||||
TEST $CLI volume heal $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT '0' echo $(count_sh_entries $B0/$V0"0")
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT '0' echo $(count_sh_entries $B0/$V0"1")
|
||||
|
||||
|
@ -54,7 +54,7 @@ TEST stat $M0/b
|
||||
TEST gluster volume heal $V0 full
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0
|
||||
EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
|
||||
|
||||
size=`stat -c '%s' /etc/passwd`
|
||||
EXPECT $size stat -c '%s' $B0/gfs0/brick01/a
|
||||
|
@ -216,7 +216,7 @@ function afr_get_changelog_xattr {
|
||||
echo $xval
|
||||
}
|
||||
|
||||
function afr_get_pending_heal_count {
|
||||
function get_pending_heal_count {
|
||||
local vol=$1
|
||||
gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
|
||||
}
|
||||
|
@ -26,9 +26,6 @@
|
||||
#include "syncop-utils.h"
|
||||
#include "cluster-syncop.h"
|
||||
|
||||
#define EC_MAX_BACKGROUND_HEALS 8
|
||||
#define EC_MAX_HEAL_WAITERS 128
|
||||
|
||||
#define alloca0(size) ({void *__ptr; __ptr = alloca(size); memset(__ptr, 0, size); __ptr; })
|
||||
#define EC_COUNT(array, max) ({int __i; int __res = 0; for (__i = 0; __i < max; __i++) if (array[__i]) __res++; __res; })
|
||||
#define EC_INTERSECT(dst, src1, src2, max) ({int __i; for (__i = 0; __i < max; __i++) dst[__i] = src1[__i] && src2[__i]; })
|
||||
@ -2329,10 +2326,9 @@ __ec_dequeue_heals (ec_t *ec)
|
||||
if (list_empty (&ec->heal_waiting))
|
||||
goto none;
|
||||
|
||||
if (ec->healers == EC_MAX_BACKGROUND_HEALS)
|
||||
if ((ec->background_heals > 0) && (ec->healers >= ec->background_heals))
|
||||
goto none;
|
||||
|
||||
GF_ASSERT (ec->healers < EC_MAX_BACKGROUND_HEALS);
|
||||
fop = list_entry(ec->heal_waiting.next, ec_fop_data_t, healer);
|
||||
ec->heal_waiters--;
|
||||
list_del_init(&fop->healer);
|
||||
@ -2400,12 +2396,14 @@ ec_heal_throttle (xlator_t *this, ec_fop_data_t *fop)
|
||||
|
||||
LOCK (&ec->lock);
|
||||
{
|
||||
if (ec->heal_waiters >= EC_MAX_HEAL_WAITERS) {
|
||||
can_heal = _gf_false;
|
||||
} else {
|
||||
if ((ec->background_heals > 0) &&
|
||||
(ec->heal_wait_qlen + ec->background_heals) >
|
||||
(ec->heal_waiters + ec->healers)) {
|
||||
list_add_tail(&fop->healer, &ec->heal_waiting);
|
||||
ec->heal_waiters++;
|
||||
fop = __ec_dequeue_heals (ec);
|
||||
} else {
|
||||
can_heal = _gf_false;
|
||||
}
|
||||
}
|
||||
UNLOCK (&ec->lock);
|
||||
|
@ -219,15 +219,35 @@ int32_t mem_acct_init(xlator_t * this)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ec_configure_background_heal_opts (ec_t *ec, int background_heals,
|
||||
int heal_wait_qlen)
|
||||
{
|
||||
if (background_heals == 0) {
|
||||
ec->heal_wait_qlen = 0;
|
||||
} else {
|
||||
ec->heal_wait_qlen = heal_wait_qlen;
|
||||
}
|
||||
ec->background_heals = background_heals;
|
||||
}
|
||||
|
||||
int32_t
|
||||
reconfigure (xlator_t *this, dict_t *options)
|
||||
{
|
||||
ec_t *ec = this->private;
|
||||
ec_t *ec = this->private;
|
||||
uint32_t heal_wait_qlen = 0;
|
||||
uint32_t background_heals = 0;
|
||||
|
||||
GF_OPTION_RECONF ("self-heal-daemon", ec->shd.enabled, options, bool, failed);
|
||||
GF_OPTION_RECONF ("self-heal-daemon", ec->shd.enabled, options, bool,
|
||||
failed);
|
||||
GF_OPTION_RECONF ("iam-self-heal-daemon", ec->shd.iamshd, options,
|
||||
bool, failed);
|
||||
|
||||
GF_OPTION_RECONF ("background-heals", background_heals, options,
|
||||
uint32, failed);
|
||||
GF_OPTION_RECONF ("heal-wait-qlength", heal_wait_qlen, options,
|
||||
uint32, failed);
|
||||
ec_configure_background_heal_opts (ec, background_heals,
|
||||
heal_wait_qlen);
|
||||
return 0;
|
||||
failed:
|
||||
return -1;
|
||||
@ -577,6 +597,10 @@ init (xlator_t *this)
|
||||
ec_method_initialize();
|
||||
GF_OPTION_INIT ("self-heal-daemon", ec->shd.enabled, bool, failed);
|
||||
GF_OPTION_INIT ("iam-self-heal-daemon", ec->shd.iamshd, bool, failed);
|
||||
GF_OPTION_INIT ("background-heals", ec->background_heals, uint32, failed);
|
||||
GF_OPTION_INIT ("heal-wait-qlength", ec->heal_wait_qlen, uint32, failed);
|
||||
ec_configure_background_heal_opts (ec, ec->background_heals,
|
||||
ec->heal_wait_qlen);
|
||||
|
||||
if (ec->shd.iamshd)
|
||||
ec_selfheal_daemon_init (this);
|
||||
@ -1188,6 +1212,10 @@ int32_t ec_dump_private(xlator_t *this)
|
||||
gf_proc_dump_write("childs_up", "%u", ec->xl_up_count);
|
||||
gf_proc_dump_write("childs_up_mask", "%s",
|
||||
ec_bin(tmp, sizeof(tmp), ec->xl_up, ec->nodes));
|
||||
gf_proc_dump_write("background-heals", "%d", ec->background_heals);
|
||||
gf_proc_dump_write("heal-wait-qlength", "%d", ec->heal_wait_qlen);
|
||||
gf_proc_dump_write("healers", "%d", ec->healers);
|
||||
gf_proc_dump_write("heal-waiters", "%d", ec->heal_waiters);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1271,5 +1299,21 @@ struct volume_options options[] =
|
||||
"translator is running as part of self-heal-daemon "
|
||||
"or not."
|
||||
},
|
||||
{ .key = {"background-heals"},
|
||||
.type = GF_OPTION_TYPE_INT,
|
||||
.min = 0,/*Disabling background heals*/
|
||||
.max = 256,
|
||||
.default_value = "8",
|
||||
.description = "This option can be used to control number of parallel"
|
||||
" heals",
|
||||
},
|
||||
{ .key = {"heal-wait-qlength"},
|
||||
.type = GF_OPTION_TYPE_INT,
|
||||
.min = 0,
|
||||
.max = 65536, /*Around 100MB as of now with sizeof(ec_fop_data_t) at 1800*/
|
||||
.default_value = "128",
|
||||
.description = "This option can be used to control number of heals"
|
||||
" that can wait",
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
@ -47,6 +47,8 @@ struct _ec
|
||||
gf_lock_t lock;
|
||||
gf_timer_t * timer;
|
||||
gf_boolean_t shutdown;
|
||||
uint32_t background_heals;
|
||||
uint32_t heal_wait_qlen;
|
||||
struct list_head pending_fops;
|
||||
struct list_head heal_waiting;
|
||||
struct list_head healing;
|
||||
|
@ -2010,6 +2010,14 @@ struct volopt_map_entry glusterd_volopt_map[] = {
|
||||
.voltype = "features/upcall",
|
||||
.op_version = GD_OP_VERSION_3_7_0,
|
||||
},
|
||||
{ .key = "disperse.background-heals",
|
||||
.voltype = "cluster/disperse",
|
||||
.op_version = GD_OP_VERSION_3_7_3,
|
||||
},
|
||||
{ .key = "disperse.heal-wait-qlength",
|
||||
.voltype = "cluster/disperse",
|
||||
.op_version = GD_OP_VERSION_3_7_3,
|
||||
},
|
||||
{ .key = NULL
|
||||
}
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user