2012-11-23 22:19:32 +05:30
function volinfo_field()
{
local vol=$1;
local field=$2;
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
}
2016-01-21 14:48:34 +05:30
function volume_get_field()
{
local vol=$1
local field=$2
$CLI volume get $vol $field | tail -1 | awk '{print $2}'
}
2012-11-23 22:19:32 +05:30
function brick_count()
{
local vol=$1;
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
2013-02-04 12:04:04 +05:30
function online_brick_count ()
{
2015-03-19 15:15:07 -04:00
pgrep glusterfsd | wc -l
2013-02-04 12:04:04 +05:30
}
2015-05-28 10:49:36 +05:30
function brick_up_status {
local vol=$1
local host=$2
local brick=$3
2015-12-08 19:06:24 +05:30
$CLI volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
2015-05-28 10:49:36 +05:30
}
2012-11-23 22:19:32 +05:30
function volume_option()
{
local vol=$1
local key=$2
$CLI volume info $vol | egrep "^$key: " | cut -f2 -d' ';
}
2012-12-04 18:22:33 +05:30
dht: better layout-optimization algorithm
This method deals with the case where swapping might gain a bigger overlap
for the xlator currently under consideration, but sacrifices even more from
the xlator we're swapping with. For example:
A = 0x00000000 - 0x44444443 (new 0x00000000 - 0x55555554)
B = 0x44444444 - 0x77777776 (new 0x55555555 - 0xaaaaaaa9)
C = 0x77777777 - 0xffffffff (new 0xaaaaaaaa - 0xffffffff)
Here, the new range for B has a bigger overlap with the old C than with the
old B (0x33333333 vs. 0x22222222 to be precise) so looking only at that
might lead us to swap. However, such a swap turns the new C's overlap from
0x55555556 (vs. old C) to *zero* (vs. old B). In other words, we've gained
0x11111111 for B but lost 0x55555556 for C, so it's a bad idea.
The new algorithm accounts for all effects of the swap, so it not only avoids
bad swaps but can make some good ones that would have been missed previously.
For example, if swapping a range X with a later range Y would not increase the
overlap for X we would previously have skipped it even if the swap would
increase Y's overlap without affecting X's. This is the normal case when we're
adding a new brick (which initially has zero overlap with any old range) so
finding more good swaps is probably even more important than avoiding bad ones.
Also, the logic in dht_overlap_calc was completely broken before, causing
integer overflows instead of providing correct values, so no matter what
higher-level algorithm was in place the GIGO effect would have resulted in
bad decisions.
Change-Id: If61ed513cfcb931916c6b51da293e3efbaaf385f
BUG: 853258
Signed-off-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-on: http://review.gluster.org/3908
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
2013-02-05 19:19:06 -05:00
function rebalance_status_field {
2015-05-28 11:23:55 +05:30
$CLI volume rebalance $1 status | awk '{print $7}' | sed -n 3p
}
function fix-layout_status_field {
#The fix-layout status can be upto 3 words, (ex:'fix-layout in progress'), hence the awk-print $2 thru $4.
2013-09-13 18:48:38 +05:30
#But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(ex:'completed 3.00').
#So we trim the numbers out with `tr`. Finally remove the trailing white spaces with sed. What we get is one of the
#strings in the 'cli_vol_task_status_str' char array of cli-rpc-ops.c
2015-05-28 11:23:55 +05:30
$CLI volume rebalance $1 status | awk '{print $2,$3,$4}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
2012-12-04 18:22:33 +05:30
}
2015-12-21 12:07:51 +05:30
function detach_tier_status_field {
$CLI volume tier $1 detach status | awk '{print $7,$8,$9}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
}
2012-12-04 18:22:33 +05:30
function remove_brick_status_completed_field {
2013-06-13 14:51:46 +00:00
local vol=$1
local brick_list=$2
$CLI volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
2012-12-04 18:22:33 +05:30
}
function get_mount_process_pid {
local vol=$1
2014-07-10 06:02:38 +02:00
ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol " | awk '{print $2}' | head -1
2012-12-04 18:22:33 +05:30
}
2012-12-06 12:17:02 +05:30
function get_nfs_pid ()
{
2014-07-10 06:02:38 +02:00
ps auxww | grep "volfile-id\ gluster\/nfs" | awk '{print $2}' | head -1
2012-12-06 12:17:02 +05:30
}
function read_nfs_pidfile ()
{
2014-06-29 18:56:44 -07:00
echo `cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid`
2012-12-06 12:17:02 +05:30
}
2013-02-16 20:10:24 +05:30
function cleanup_statedump {
pid=$1
rm -f $statedumpdir/*$pid.dump.*
#.vimrc friendly comment */
}
2013-02-06 08:39:41 +05:30
function generate_statedump {
2012-12-04 18:22:33 +05:30
local fpath=""
2013-02-06 08:39:41 +05:30
pid=$1
2012-12-04 18:22:33 +05:30
#remove old stale statedumps
2013-02-16 20:10:24 +05:30
cleanup_statedump $pid
2013-02-06 08:39:41 +05:30
kill -USR1 $pid
2012-12-04 18:22:33 +05:30
#Wait till the statedump is generated
sleep 1
2013-02-08 13:05:39 +05:30
fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
2013-01-24 11:39:48 +05:30
echo $statedumpdir/$fname
2012-12-04 18:22:33 +05:30
}
2013-01-21 23:32:38 +05:30
function generate_mount_statedump {
local vol=$1
2013-02-06 08:39:41 +05:30
generate_statedump $(get_mount_process_pid $vol)
2013-01-21 23:32:38 +05:30
}
2013-02-16 20:10:24 +05:30
function cleanup_mount_statedump {
local vol=$1
cleanup_statedump $(get_mount_process_pid $vol)
}
2014-06-16 20:38:42 +05:30
function snap_client_connected_status {
local vol=$1
local fpath=$(generate_mount_statedump $vol)
2015-05-19 23:09:42 +05:30
up=$(grep -a -A2 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
2014-06-16 20:38:42 +05:30
rm -f $fpath
echo "$up"
}
2016-04-04 14:55:20 +05:30
function _jbrc_child_up_status {
2016-02-12 14:57:47 +05:30
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
local gen_state_dump=$3
local fpath=$($gen_state_dump $vol)
up=$(grep -a -B1 child_$brick_id=$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
rm -f $fpath
echo "$up"
}
2016-04-04 14:55:20 +05:30
function jbrc_child_up_status {
2016-02-12 14:57:47 +05:30
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
2016-04-04 14:55:20 +05:30
_jbrc_child_up_status $vol $brick_id generate_mount_statedump
2016-02-12 14:57:47 +05:30
}
2013-01-21 23:32:38 +05:30
function _afr_child_up_status {
2012-12-04 18:22:33 +05:30
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
2013-01-21 23:32:38 +05:30
local gen_state_dump=$3
local fpath=$($gen_state_dump $vol)
2015-05-19 23:09:42 +05:30
up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
2012-12-04 18:22:33 +05:30
rm -f $fpath
echo "$up"
}
2015-01-22 13:53:47 +05:30
function afr_child_up_status_meta {
local mnt=$1
local repl=$2
local child=$3
grep "child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
}
2013-01-21 23:32:38 +05:30
function afr_child_up_status {
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
_afr_child_up_status $vol $brick_id generate_mount_statedump
}
2014-09-30 17:47:26 +02:00
function ec_get_info {
local vol=$1
local dist_id=$2
local key=$3
2015-07-15 16:37:37 +05:30
local fpath=$4
2014-09-30 17:47:26 +02:00
local value=$(sed -n "/^\[cluster\/disperse\.$vol-disperse-$dist_id\]/,/^\[/{s/^$key=\(.*\)/\1/p;}" $fpath | head -1)
rm -f $fpath
echo "$value"
}
function ec_child_up_status {
local vol=$1
local dist_id=$2
local brick_id=$(($3 + 1))
2015-07-15 16:37:37 +05:30
local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_mount_statedump $vol))
2014-09-30 17:47:26 +02:00
echo "${mask: -$brick_id:1}"
}
function ec_child_up_count {
local vol=$1
local dist_id=$2
2015-07-15 16:37:37 +05:30
ec_get_info $vol $dist_id "childs_up" $(generate_mount_statedump $vol)
}
function ec_child_up_status_shd {
local vol=$1
local dist_id=$2
local brick_id=$(($3 + 1))
local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_shd_statedump $vol))
echo "${mask: -$brick_id:1}"
}
function ec_child_up_count_shd {
local vol=$1
local dist_id=$2
ec_get_info $vol $dist_id "childs_up" $(generate_shd_statedump $vol)
2014-09-30 17:47:26 +02:00
}
2013-01-21 23:32:38 +05:30
function get_shd_process_pid {
2014-07-10 06:02:38 +02:00
ps auxww | grep glusterfs | grep -E "glustershd/run/glustershd.pid" | awk '{print $2}' | head -1
2013-01-21 23:32:38 +05:30
}
function generate_shd_statedump {
2014-12-29 15:32:28 +05:30
generate_statedump $(get_shd_process_pid)
2013-02-06 08:39:41 +05:30
}
2013-06-17 13:06:23 +05:30
function generate_nfs_statedump {
2014-12-29 15:32:28 +05:30
generate_statedump $(get_nfs_pid)
2013-06-17 13:06:23 +05:30
}
2013-02-06 08:39:41 +05:30
function generate_brick_statedump {
local vol=$1
local host=$2
local brick=$3
generate_statedump $(get_brick_pid $vol $host $brick)
2013-01-21 23:32:38 +05:30
}
function afr_child_up_status_in_shd {
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
_afr_child_up_status $vol $brick_id generate_shd_statedump
}
2013-06-17 13:06:23 +05:30
function afr_child_up_status_in_nfs {
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
_afr_child_up_status $vol $brick_id generate_nfs_statedump
}
2013-04-19 13:02:39 +05:30
function nfs_up_status {
2014-11-24 17:07:02 +05:30
gluster volume status | grep "NFS Server" | awk '{print $7}'
2013-04-19 13:02:39 +05:30
}
2012-12-04 18:22:33 +05:30
function glustershd_up_status {
2014-11-24 17:07:02 +05:30
gluster volume status | grep "Self-heal Daemon" | awk '{print $7}'
2012-12-04 18:22:33 +05:30
}
2012-11-29 17:17:49 +05:30
2014-06-22 15:20:19 +05:30
function quotad_up_status {
2014-11-24 17:07:02 +05:30
gluster volume status | grep "Quota Daemon" | awk '{print $7}'
2014-06-22 15:20:19 +05:30
}
2013-01-11 17:37:10 +05:30
function get_brick_pid {
2013-02-06 08:39:41 +05:30
local vol=$1
local host=$2
local brick=$3
local brick_hiphenated=$(echo $brick | tr '/' '-')
2014-06-29 18:56:44 -07:00
echo `cat $GLUSTERD_WORKDIR/vols/$vol/run/${host}${brick_hiphenated}.pid`
2013-01-11 17:37:10 +05:30
}
function kill_brick {
2013-02-06 08:39:41 +05:30
local vol=$1
local host=$2
local brick=$3
2013-01-11 17:37:10 +05:30
kill -9 $(get_brick_pid $vol $host $brick)
2012-11-29 17:17:49 +05:30
}
2012-12-13 14:56:52 +05:30
function check_option_help_presence {
2013-01-21 18:06:25 +05:30
local option=$1
2012-12-13 14:56:52 +05:30
$CLI volume set help | grep "^Option:" | grep -w $option
}
2012-12-06 18:05:53 +05:30
function afr_get_changelog_xattr {
2013-01-21 18:06:25 +05:30
local file=$1
local xkey=$2
2015-05-21 18:45:12 +02:00
local xval=$(getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'=')
if [ -z $xval ]; then
xval="0x000000000000000000000000"
fi
echo $xval
2012-12-06 18:05:53 +05:30
}
2013-01-21 18:06:25 +05:30
2015-06-30 23:01:36 +05:30
function get_pending_heal_count {
2013-01-21 18:06:25 +05:30
local vol=$1
gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
}
2013-01-22 14:46:48 +05:30
2015-01-23 10:51:11 +05:30
function afr_get_split_brain_count {
local vol=$1
gluster volume heal $vol info split-brain | grep "Number of entries in split-brain" | awk '{ sum+=$6} END {print sum}'
}
2013-01-22 14:46:48 +05:30
function afr_get_index_path {
local brick_path=$1
echo "$brick_path/.glusterfs/indices/xattrop"
}
function afr_get_num_indices_in_brick {
local brick_path=$1
echo $(ls $(afr_get_index_path $brick_path) | grep -v xattrop | wc -l)
}
2013-01-11 17:37:10 +05:30
function gf_get_gfid_xattr {
file=$1
getfattr -n trusted.gfid -e hex $file 2>/dev/null | grep "trusted.gfid" | cut -f2 -d'='
}
function gf_gfid_xattr_to_str {
xval=$1
echo "${xval:2:8}-${xval:10:4}-${xval:14:4}-${xval:18:4}-${xval:22:12}"
}
2015-02-25 15:09:28 +05:30
function get_text_xattr {
local key=$1
local path=$2
2015-06-26 11:46:45 +05:30
getfattr -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f2 -d'='
2015-02-25 15:09:28 +05:30
}
2013-01-11 17:37:10 +05:30
function gf_check_file_opened_in_brick {
vol=$1
host=$2
brick=$3
realpath=$4
ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
echo "Y"
else
echo "N"
fi
}
function gf_get_gfid_backend_file_path {
brickpath=$1
filepath_in_brick=$2
gfid=$(gf_get_gfid_xattr "$brickpath/$filepath_in_brick")
gfidstr=$(gf_gfid_xattr_to_str $gfid)
echo "$brickpath/.glusterfs/${gfidstr:0:2}/${gfidstr:2:2}/$gfidstr"
}
function gf_rm_file_and_gfid_link {
brickpath=$1
filepath_in_brick=$2
rm -f $(gf_get_gfid_backend_file_path $brickpath $filepath_in_brick)
rm -f "$brickpath/$filepath_in_brick"
}
2013-01-29 22:58:17 +05:30
function gd_is_replace_brick_completed {
local host=$1
local vol=$2
local src_brick=$3
local dst_brick=$4
$CLI volume replace-brick $vol $src_brick $dst_brick status | grep -i "Migration complete"
if [ $? -eq 0 ]; then
echo "Y"
else
echo "N"
fi
}
dht: better layout-optimization algorithm
This method deals with the case where swapping might gain a bigger overlap
for the xlator currently under consideration, but sacrifices even more from
the xlator we're swapping with. For example:
A = 0x00000000 - 0x44444443 (new 0x00000000 - 0x55555554)
B = 0x44444444 - 0x77777776 (new 0x55555555 - 0xaaaaaaa9)
C = 0x77777777 - 0xffffffff (new 0xaaaaaaaa - 0xffffffff)
Here, the new range for B has a bigger overlap with the old C than with the
old B (0x33333333 vs. 0x22222222 to be precise) so looking only at that
might lead us to swap. However, such a swap turns the new C's overlap from
0x55555556 (vs. old C) to *zero* (vs. old B). In other words, we've gained
0x11111111 for B but lost 0x55555556 for C, so it's a bad idea.
The new algorithm accounts for all effects of the swap, so it not only avoids
bad swaps but can make some good ones that would have been missed previously.
For example, if swapping a range X with a later range Y would not increase the
overlap for X we would previously have skipped it even if the swap would
increase Y's overlap without affecting X's. This is the normal case when we're
adding a new brick (which initially has zero overlap with any old range) so
finding more good swaps is probably even more important than avoiding bad ones.
Also, the logic in dht_overlap_calc was completely broken before, causing
integer overflows instead of providing correct values, so no matter what
higher-level algorithm was in place the GIGO effect would have resulted in
bad decisions.
Change-Id: If61ed513cfcb931916c6b51da293e3efbaaf385f
BUG: 853258
Signed-off-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-on: http://review.gluster.org/3908
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
2013-02-05 19:19:06 -05:00
function dht_get_layout {
2014-08-23 02:14:36 -07:00
local my_xa=trusted.glusterfs.dht
getfattr -d -e hex -n $my_xa $1 2> /dev/null | grep "$my_xa=" | cut -d= -f2
dht: better layout-optimization algorithm
This method deals with the case where swapping might gain a bigger overlap
for the xlator currently under consideration, but sacrifices even more from
the xlator we're swapping with. For example:
A = 0x00000000 - 0x44444443 (new 0x00000000 - 0x55555554)
B = 0x44444444 - 0x77777776 (new 0x55555555 - 0xaaaaaaa9)
C = 0x77777777 - 0xffffffff (new 0xaaaaaaaa - 0xffffffff)
Here, the new range for B has a bigger overlap with the old C than with the
old B (0x33333333 vs. 0x22222222 to be precise) so looking only at that
might lead us to swap. However, such a swap turns the new C's overlap from
0x55555556 (vs. old C) to *zero* (vs. old B). In other words, we've gained
0x11111111 for B but lost 0x55555556 for C, so it's a bad idea.
The new algorithm accounts for all effects of the swap, so it not only avoids
bad swaps but can make some good ones that would have been missed previously.
For example, if swapping a range X with a later range Y would not increase the
overlap for X we would previously have skipped it even if the swap would
increase Y's overlap without affecting X's. This is the normal case when we're
adding a new brick (which initially has zero overlap with any old range) so
finding more good swaps is probably even more important than avoiding bad ones.
Also, the logic in dht_overlap_calc was completely broken before, causing
integer overflows instead of providing correct values, so no matter what
higher-level algorithm was in place the GIGO effect would have resulted in
bad decisions.
Change-Id: If61ed513cfcb931916c6b51da293e3efbaaf385f
BUG: 853258
Signed-off-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-on: http://review.gluster.org/3908
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Anand Avati <avati@redhat.com>
2013-02-05 19:19:06 -05:00
}
2013-03-09 16:36:56 +05:30
2013-06-28 19:11:47 +05:30
function afr_get_specific_changelog_xattr ()
{
local path=$1
local key=$2
local type=$3
local specific_changelog=""
changelog_xattr=$(afr_get_changelog_xattr "$path" "$key")
if [ "$type" == "data" ]; then
specific_changelog=${changelog_xattr:2:8}
elif [ "$type" == "metadata" ]; then
specific_changelog=${changelog_xattr:10:8}
elif [ "$type" == "entry" ]; then
specific_changelog=${changelog_xattr:18:8}
else
specific_changlog="error"
fi
echo $specific_changelog
}
2013-03-09 16:36:56 +05:30
##
# query pathinfo xattr and extract POSIX pathname(s)
##
function get_backend_paths {
local path=$1
getfattr -m . -n trusted.glusterfs.pathinfo $path | tr ' ' '\n' | sed -n 's/<POSIX.*:.*:\(.*\)>.*/\1/p'
}
2013-10-07 16:00:59 +05:30
#Gets the xattr value in hex, also removed 0x in front of the value
function get_hex_xattr {
local key=$1
local path=$2
getfattr -d -m. -e hex $2 2>/dev/null | grep $1 | cut -f2 -d'=' | cut -f2 -d'x'
}
2013-12-30 22:59:39 -05:00
function cumulative_stat_count {
echo "$1" | grep "Cumulative Stats:" | wc -l
}
function incremental_stat_count {
echo "$1" | grep "Interval$2Stats:" | wc -l
}
function cleared_stat_count {
echo "$1" | grep "Cleared stats." | wc -l
}
function data_read_count {
echo "$1" | grep "Data Read:$2bytes" | wc -l
}
function data_written_count {
echo "$1" | grep "Data Written:$2bytes" | wc -l
}
2014-03-25 11:07:31 +05:30
function has_holes {
2014-08-07 17:01:18 +02:00
if [ $((`stat -c '%b*%B-%s' $1`)) -lt 0 ];
2014-03-25 11:07:31 +05:30
then
echo "1"
else
echo "0"
fi
}
2014-02-19 16:30:11 +05:30
function do_volume_operations() {
local operation=$1
local count=$2
local force=$3
local pids=()
local cli
local v
for i in `seq 1 $count`; do
cli="CLI_$i"
v="V`expr $i - 1`"
${!cli} volume $operation ${!v} $force &
pids[$i]=$!
done
for i in `seq 1 $count`; do
wait ${pids[$i]}
done
}
function start_volumes() {
do_volume_operations start $1
}
function stop_volumes() {
do_volume_operations stop $1
}
function start_force_volumes() {
do_volume_operations start $1 force
}
function stop_force_volumes() {
do_volume_operations stop $1 force
}
function delete_volumes() {
do_volume_operations delete $1
}
function volume_exists() {
2015-03-30 13:22:45 -04:00
$CLI volume info $1 > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Y"
else
echo "N"
fi
2014-02-19 16:30:11 +05:30
}
2014-05-05 09:18:35 +05:30
features/changelog: Consider only changelog on/off as changelog breakage
Earlier, both chagelog on/off and brick restart were considered
to be changelog breakage and treated as changelog not being
continuous. As a result, new HTIME.TSTAMP file was created on
both the above cases. Now the change is made such that only
on changelog enable/disable, the changelog is considered to be
discontinuous. New HTIME.TSTAMP file is not created on brick
restart, the changelogs files are appended to last HTIME.TSTAMP
file.
Treating changelog as continuous in above scenario is important
as changelog history API will fail otherwise. It can successfully
get changes between start and end timestamps only when changelog
is continuous (Changelogs in single HTIME.TSTAMP file are treated
as continuous). Without this change, changelog history API would
fail, and it would become necessary to fallback to other mechanisms
like xsync FSCrawl in case geo-rep to detect changes in this time
window. But Xsync FSCrawl would not be applicable to other
consumers like glusterfind.
Rationale:
1. In plain distributed volume, if brick goes down, no I/O can
happen onto the brick. Hence changelog is intact with data
on disk.
2. In distributed replicate volume, if brick goes down, since
self-heal traffic is captured in changelog. Eventually,
I/O happened whend brick down is captured in changelog.
Change-Id: I2eb66efe6ee9a9228fb1fcb38d6e7696b9559d5b
BUG: 1211327
Signed-off-by: Kotresh HR <khiremat@redhat.com>
Reviewed-on: http://review.gluster.org/10222
Reviewed-by: Venky Shankar <vshankar@redhat.com>
Tested-by: Venky Shankar <vshankar@redhat.com>
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Tested-by: NetBSD Build System
2015-04-13 20:28:21 +05:30
function killall_gluster() {
pkill gluster
sleep 1
}
2014-05-05 09:18:35 +05:30
function afr_get_index_count {
local brick=$1
ls $1/.glusterfs/indices/xattrop | grep -v xattrop | wc -l
}
2014-05-23 12:51:28 +05:30
function landfill_entry_count {
local brick=$1
ls $brick/.glusterfs/landfill | wc -l
}
2014-06-15 16:19:15 +05:30
function path_exists {
stat $1
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
}
2014-06-18 22:10:12 +05:30
function force_umount {
2015-06-07 21:32:32 +02:00
${UMOUNT_F} $*
2014-06-18 22:10:12 +05:30
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
}
2014-08-21 08:50:23 +05:30
function assign_gfid {
local gfid=$1
local file=$2
setfattr -n trusted.gfid -v $1 $2
}
function get_random_gfid {
echo "0x"$(uuidgen | awk -F '-' 'BEGIN {OFS=""} {print $1,$2,$3,$4,$5}')
}
2014-12-29 15:32:28 +05:30
function volgen_volume_exists {
local volfile="$1"
local xl_vol="$2"
local xl_type="$3"
local xl_feature="$4"
xl=$(sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d" $volfile)
if [ -z "$xl" ];
then
echo "N"
else
echo "Y"
fi
}
function volgen_volume_option {
local volfile="$1"
local xl_vol="$2"
local xl_type="$3"
local xl_feature="$4"
local xl_option="$5"
sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
}
2015-02-02 18:49:01 +05:30
function mount_get_option_value {
local m=$1
local subvol=$2
local key=$3
2015-03-11 17:43:12 +05:30
grep -w "$3" $m/.meta/graphs/active/$subvol/private | awk '{print $3}'
}
function get_volume_mark {
getfattr -n trusted.glusterfs.volume-mark -ehex $1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'
2015-02-02 18:49:01 +05:30
}
2014-06-13 17:52:30 +05:30
# setup geo-rep in a single a node.
function setup_georep {
$CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
$CLI volume start $GMV0
$CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
$CLI volume start $GSV0
$CLI system:: execute gsec_create
$CLI volume geo-rep $GMV0 $H0::$GSV0 create push-pem
$CLI volume geo-rep $GMV0 $H0::$GSV0 start
sleep 80 # after start geo-rep takes a minute to get stable
}
# stop and delete geo-rep session
function cleanup_georep {
$CLI volume geo-rep $GMV0 $H0::$GSV0 stop
$CLI volume geo-rep $GMV0 $H0::$GSV0 delete
}
2015-04-18 18:40:55 +05:30
function num_graphs
{
local mountpoint=$1
echo `ls $mountpoint/.meta/graphs/ | grep -v active | wc -l`
}
2015-05-01 11:01:15 +05:30
function get_aux()
{
##Check if a auxiliary mount is there
tests/quota : improving tests for quota
tests/basic/quota.t includes all the basic test that
needs to be tested for quota. In most of the other
tests specific to bugs(tests/bugs/quota/*), tests
such as creating and starting volume, enabling quota,
setting limit, writing data, doing list have been done
which is essential to write a individual quota test
file, but, if the specific bug just needs to test
*few* particular cases, I have moved those tests
under tests/basic itself to speedup the regressions.
Basics of inode-quota and it's enforcing, renaming
with quota are basic tests and is hence moved under
tests/basic folder.
In other files, I have removed tests which are not
needed, such as 'pidof glusterd' or checking for
'gluster volume info' or if there are any test which
is already being tested under tests/basic and is being
written again.
Change-Id: Iefd6d9529246d59829cc5bf02687a1861d8462a8
BUG: 1294826
Signed-off-by: Manikandan Selvaganesh <mselvaga@redhat.com>
Reviewed-on: http://review.gluster.org/13216
Smoke: Gluster Build System <jenkins@build.gluster.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Vijaikumar Mallikarjuna <vmallika@redhat.com>
Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-01-12 16:58:57 +05:30
df -h 2>&1 | sed 's#/build/install##' | grep -e "[[:space:]]/run/gluster/${V0}$" -e "[[:space:]]/var/run/gluster/${V0}$" -
2015-05-01 11:01:15 +05:30
if [ $? -eq 0 ]
then
echo "0"
else
echo "1"
fi
}
2015-05-09 12:23:10 +05:30
function get_bitd_count {
2015-06-17 22:59:17 +05:30
ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
2015-05-09 12:23:10 +05:30
}
function get_scrubd_count {
2015-06-17 22:59:17 +05:30
ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
2015-05-09 12:23:10 +05:30
}
2015-07-14 14:01:14 +05:30
function get_quotad_count {
ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
}
2015-06-07 21:32:32 +02:00
2015-07-02 18:23:51 +05:30
function get_nfs_count {
ps auxww | grep glusterfs | grep nfs.pid | grep -v grep | wc -l
}
function get_snapd_count {
ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
}
2015-06-07 21:32:32 +02:00
function drop_cache() {
case $OSTYPE in
Linux)
echo 3 > /proc/sys/vm/drop_caches
;;
*)
# fail but flush caches
( cd $1 && umount $1 2>/dev/null )
;;
esac
}
2015-07-08 22:22:50 +05:30
function quota_list_field () {
local QUOTA_PATH=$1
local FIELD=$2
2015-07-15 15:17:31 +05:30
local awk_arg="{print \$$FIELD}"
2015-07-10 14:37:50 +05:30
2015-07-15 15:17:31 +05:30
$CLI volume quota $V0 list $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
2015-07-08 22:22:50 +05:30
}
function quota_object_list_field () {
local QUOTA_PATH=$1
local FIELD=$2
2015-07-15 15:17:31 +05:30
local awk_arg="{print \$$FIELD}"
2015-07-10 14:37:50 +05:30
2015-07-15 15:17:31 +05:30
$CLI volume quota $V0 list-objects $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
2015-07-08 22:22:50 +05:30
}
2015-07-10 14:37:50 +05:30
function quotausage()
2015-07-08 22:22:50 +05:30
{
quota_list_field $1 4
}
2015-07-10 14:37:50 +05:30
function quota_hard_limit()
{
quota_list_field $1 2
}
function quota_soft_limit()
{
quota_list_field $1 3
}
function quota_sl_exceeded()
{
quota_list_field $1 6
}
function quota_hl_exceeded()
{
quota_list_field $1 7
}
2015-05-20 19:52:11 +05:30
2015-11-23 17:00:07 +05:30
function scrub_status()
{
local vol=$1;
local field=$2;
$CLI volume bitrot $vol scrub status | grep "^$field: " | sed 's/.*: //';
}
2016-02-09 16:40:36 +05:30
function get_gfid_string {
local path=$1;
getfattr -n glusterfs.gfid.string $1 2>/dev/null \
| grep glusterfs.gfid.string | cut -d '"' -f 2
}
2016-03-08 15:44:04 +05:30
function file_all_zeroes {
< $1 tr -d '\0' | read -n 1 || echo 1
}
2016-05-26 08:25:37 +05:30
function get_hard_link_count {
local path=$1;
stat -c %h $path
}