If a (f)xattrop is issued with a value that only contains 0's, then we don't modify or create the extended attribute. This is useful to avoid ctime modifications when the only purpose of the xattrop was to get the current value. Change-Id: Ia62494e9009962e683c8276783f671da17a8b03a BUG: 1211123 Signed-off-by: Xavier Hernandez <xhernandez@datalab.es> Reviewed-on: http://review.gluster.org/10886 Tested-by: NetBSD Build System Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
546 lines
14 KiB
Plaintext
546 lines
14 KiB
Plaintext
function volinfo_field()
|
|
{
|
|
local vol=$1;
|
|
local field=$2;
|
|
|
|
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
|
|
}
|
|
|
|
|
|
function brick_count()
|
|
{
|
|
local vol=$1;
|
|
|
|
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
|
|
}
|
|
|
|
function online_brick_count ()
|
|
{
|
|
pgrep glusterfsd | wc -l
|
|
}
|
|
|
|
function volume_option()
|
|
{
|
|
local vol=$1
|
|
local key=$2
|
|
$CLI volume info $vol | egrep "^$key: " | cut -f2 -d' ';
|
|
}
|
|
|
|
function rebalance_status_field {
|
|
#The rebalance status can be upto 3 words, (ex:'fix-layout in progress'), hence the awk-print $7 thru $9.
|
|
#But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(ex:'completed 3.00').
|
|
#So we trim the numbers out with `tr`. Finally remove the trailing white spaces with sed. What we get is one of the
|
|
#strings in the 'cli_vol_task_status_str' char array of cli-rpc-ops.c
|
|
|
|
$CLI volume rebalance $1 status | awk '{print $7,$8,$9}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
|
|
}
|
|
|
|
function remove_brick_status_completed_field {
|
|
local vol=$1
|
|
local brick_list=$2
|
|
$CLI volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
|
|
}
|
|
|
|
function get_mount_process_pid {
|
|
local vol=$1
|
|
ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol " | awk '{print $2}' | head -1
|
|
}
|
|
|
|
function get_nfs_pid ()
|
|
{
|
|
ps auxww | grep "volfile-id\ gluster\/nfs" | awk '{print $2}' | head -1
|
|
}
|
|
|
|
function read_nfs_pidfile ()
|
|
{
|
|
echo `cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid`
|
|
}
|
|
|
|
function cleanup_statedump {
|
|
pid=$1
|
|
rm -f $statedumpdir/*$pid.dump.*
|
|
#.vimrc friendly comment */
|
|
}
|
|
|
|
function generate_statedump {
|
|
local fpath=""
|
|
pid=$1
|
|
#remove old stale statedumps
|
|
cleanup_statedump $pid
|
|
kill -USR1 $pid
|
|
#Wait till the statedump is generated
|
|
sleep 1
|
|
fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
|
|
echo $statedumpdir/$fname
|
|
}
|
|
|
|
function generate_mount_statedump {
|
|
local vol=$1
|
|
generate_statedump $(get_mount_process_pid $vol)
|
|
}
|
|
|
|
function cleanup_mount_statedump {
|
|
local vol=$1
|
|
cleanup_statedump $(get_mount_process_pid $vol)
|
|
}
|
|
|
|
function snap_client_connected_status {
|
|
local vol=$1
|
|
local fpath=$(generate_mount_statedump $vol)
|
|
up=$(grep -a -A2 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
|
|
rm -f $fpath
|
|
echo "$up"
|
|
}
|
|
|
|
function _afr_child_up_status {
|
|
local vol=$1
|
|
#brick_id is (brick-num in volume info - 1)
|
|
local brick_id=$2
|
|
local gen_state_dump=$3
|
|
local fpath=$($gen_state_dump $vol)
|
|
up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
|
|
rm -f $fpath
|
|
echo "$up"
|
|
}
|
|
|
|
function afr_child_up_status_meta {
|
|
local mnt=$1
|
|
local repl=$2
|
|
local child=$3
|
|
grep "child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
|
|
}
|
|
|
|
function afr_child_up_status {
|
|
local vol=$1
|
|
#brick_id is (brick-num in volume info - 1)
|
|
local brick_id=$2
|
|
_afr_child_up_status $vol $brick_id generate_mount_statedump
|
|
}
|
|
|
|
function ec_get_info {
|
|
local vol=$1
|
|
local dist_id=$2
|
|
local key=$3
|
|
local fpath=$(generate_mount_statedump $vol)
|
|
local value=$(sed -n "/^\[cluster\/disperse\.$vol-disperse-$dist_id\]/,/^\[/{s/^$key=\(.*\)/\1/p;}" $fpath | head -1)
|
|
rm -f $fpath
|
|
echo "$value"
|
|
}
|
|
|
|
function ec_child_up_status {
|
|
local vol=$1
|
|
local dist_id=$2
|
|
local brick_id=$(($3 + 1))
|
|
local mask=$(ec_get_info $vol $dist_id "childs_up_mask")
|
|
echo "${mask: -$brick_id:1}"
|
|
}
|
|
|
|
function ec_child_up_count {
|
|
local vol=$1
|
|
local dist_id=$2
|
|
ec_get_info $vol $dist_id "childs_up"
|
|
}
|
|
|
|
function get_shd_process_pid {
|
|
ps auxww | grep glusterfs | grep -E "glustershd/run/glustershd.pid" | awk '{print $2}' | head -1
|
|
}
|
|
|
|
function generate_shd_statedump {
|
|
generate_statedump $(get_shd_process_pid)
|
|
}
|
|
|
|
function generate_nfs_statedump {
|
|
generate_statedump $(get_nfs_pid)
|
|
}
|
|
|
|
function generate_brick_statedump {
|
|
local vol=$1
|
|
local host=$2
|
|
local brick=$3
|
|
generate_statedump $(get_brick_pid $vol $host $brick)
|
|
}
|
|
|
|
function afr_child_up_status_in_shd {
|
|
local vol=$1
|
|
#brick_id is (brick-num in volume info - 1)
|
|
local brick_id=$2
|
|
_afr_child_up_status $vol $brick_id generate_shd_statedump
|
|
}
|
|
|
|
function afr_child_up_status_in_nfs {
|
|
local vol=$1
|
|
#brick_id is (brick-num in volume info - 1)
|
|
local brick_id=$2
|
|
_afr_child_up_status $vol $brick_id generate_nfs_statedump
|
|
}
|
|
|
|
function nfs_up_status {
|
|
gluster volume status | grep "NFS Server" | awk '{print $7}'
|
|
}
|
|
|
|
function glustershd_up_status {
|
|
gluster volume status | grep "Self-heal Daemon" | awk '{print $7}'
|
|
}
|
|
|
|
function quotad_up_status {
|
|
gluster volume status | grep "Quota Daemon" | awk '{print $7}'
|
|
}
|
|
|
|
function get_brick_pid {
|
|
local vol=$1
|
|
local host=$2
|
|
local brick=$3
|
|
local brick_hiphenated=$(echo $brick | tr '/' '-')
|
|
echo `cat $GLUSTERD_WORKDIR/vols/$vol/run/${host}${brick_hiphenated}.pid`
|
|
}
|
|
|
|
function kill_brick {
|
|
local vol=$1
|
|
local host=$2
|
|
local brick=$3
|
|
kill -9 $(get_brick_pid $vol $host $brick)
|
|
}
|
|
|
|
function check_option_help_presence {
|
|
local option=$1
|
|
$CLI volume set help | grep "^Option:" | grep -w $option
|
|
}
|
|
|
|
function afr_get_changelog_xattr {
|
|
local file=$1
|
|
local xkey=$2
|
|
local xval=$(getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'=')
|
|
if [ -z $xval ]; then
|
|
xval="0x000000000000000000000000"
|
|
fi
|
|
echo $xval
|
|
}
|
|
|
|
function afr_get_pending_heal_count {
|
|
local vol=$1
|
|
gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
|
|
}
|
|
|
|
function afr_get_split_brain_count {
|
|
local vol=$1
|
|
gluster volume heal $vol info split-brain | grep "Number of entries in split-brain" | awk '{ sum+=$6} END {print sum}'
|
|
}
|
|
|
|
function afr_get_index_path {
|
|
local brick_path=$1
|
|
echo "$brick_path/.glusterfs/indices/xattrop"
|
|
}
|
|
|
|
function afr_get_num_indices_in_brick {
|
|
local brick_path=$1
|
|
echo $(ls $(afr_get_index_path $brick_path) | grep -v xattrop | wc -l)
|
|
}
|
|
|
|
function gf_get_gfid_xattr {
|
|
file=$1
|
|
getfattr -n trusted.gfid -e hex $file 2>/dev/null | grep "trusted.gfid" | cut -f2 -d'='
|
|
}
|
|
|
|
function gf_gfid_xattr_to_str {
|
|
xval=$1
|
|
echo "${xval:2:8}-${xval:10:4}-${xval:14:4}-${xval:18:4}-${xval:22:12}"
|
|
}
|
|
|
|
function get_text_xattr {
|
|
local key=$1
|
|
local path=$2
|
|
getfattr -d -m. -e text $path 2>/dev/null | grep $key | cut -f2 -d'='
|
|
}
|
|
|
|
function gf_check_file_opened_in_brick {
|
|
vol=$1
|
|
host=$2
|
|
brick=$3
|
|
realpath=$4
|
|
ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" 2>&1 > /dev/null
|
|
if [ $? -eq 0 ]; then
|
|
echo "Y"
|
|
else
|
|
echo "N"
|
|
fi
|
|
}
|
|
|
|
function gf_get_gfid_backend_file_path {
|
|
brickpath=$1
|
|
filepath_in_brick=$2
|
|
gfid=$(gf_get_gfid_xattr "$brickpath/$filepath_in_brick")
|
|
gfidstr=$(gf_gfid_xattr_to_str $gfid)
|
|
echo "$brickpath/.glusterfs/${gfidstr:0:2}/${gfidstr:2:2}/$gfidstr"
|
|
}
|
|
|
|
function gf_rm_file_and_gfid_link {
|
|
brickpath=$1
|
|
filepath_in_brick=$2
|
|
rm -f $(gf_get_gfid_backend_file_path $brickpath $filepath_in_brick)
|
|
rm -f "$brickpath/$filepath_in_brick"
|
|
}
|
|
|
|
|
|
function gd_is_replace_brick_completed {
|
|
local host=$1
|
|
local vol=$2
|
|
local src_brick=$3
|
|
local dst_brick=$4
|
|
$CLI volume replace-brick $vol $src_brick $dst_brick status | grep -i "Migration complete"
|
|
if [ $? -eq 0 ]; then
|
|
echo "Y"
|
|
else
|
|
echo "N"
|
|
fi
|
|
}
|
|
|
|
function dht_get_layout {
|
|
local my_xa=trusted.glusterfs.dht
|
|
getfattr -d -e hex -n $my_xa $1 2> /dev/null | grep "$my_xa=" | cut -d= -f2
|
|
}
|
|
|
|
function afr_get_specific_changelog_xattr ()
|
|
{
|
|
local path=$1
|
|
local key=$2
|
|
local type=$3
|
|
local specific_changelog=""
|
|
|
|
changelog_xattr=$(afr_get_changelog_xattr "$path" "$key")
|
|
if [ "$type" == "data" ]; then
|
|
specific_changelog=${changelog_xattr:2:8}
|
|
elif [ "$type" == "metadata" ]; then
|
|
specific_changelog=${changelog_xattr:10:8}
|
|
elif [ "$type" == "entry" ]; then
|
|
specific_changelog=${changelog_xattr:18:8}
|
|
else
|
|
specific_changlog="error"
|
|
fi
|
|
|
|
echo $specific_changelog
|
|
}
|
|
##
|
|
# query pathinfo xattr and extract POSIX pathname(s)
|
|
##
|
|
function get_backend_paths {
|
|
local path=$1
|
|
|
|
getfattr -m . -n trusted.glusterfs.pathinfo $path | tr ' ' '\n' | sed -n 's/<POSIX.*:.*:\(.*\)>.*/\1/p'
|
|
}
|
|
|
|
#Gets the xattr value in hex, also removed 0x in front of the value
|
|
function get_hex_xattr {
|
|
local key=$1
|
|
local path=$2
|
|
getfattr -d -m. -e hex $2 2>/dev/null | grep $1 | cut -f2 -d'=' | cut -f2 -d'x'
|
|
}
|
|
|
|
function cumulative_stat_count {
|
|
echo "$1" | grep "Cumulative Stats:" | wc -l
|
|
}
|
|
|
|
function incremental_stat_count {
|
|
echo "$1" | grep "Interval$2Stats:" | wc -l
|
|
}
|
|
|
|
function cleared_stat_count {
|
|
echo "$1" | grep "Cleared stats." | wc -l
|
|
}
|
|
|
|
function data_read_count {
|
|
echo "$1" | grep "Data Read:$2bytes" | wc -l
|
|
}
|
|
|
|
function data_written_count {
|
|
echo "$1" | grep "Data Written:$2bytes" | wc -l
|
|
}
|
|
|
|
function has_holes {
|
|
if [ $((`stat -c '%b*%B-%s' $1`)) -lt 0 ];
|
|
then
|
|
echo "1"
|
|
else
|
|
echo "0"
|
|
fi
|
|
}
|
|
|
|
function do_volume_operations() {
|
|
local operation=$1
|
|
local count=$2
|
|
local force=$3
|
|
|
|
local pids=()
|
|
local cli
|
|
local v
|
|
|
|
for i in `seq 1 $count`; do
|
|
cli="CLI_$i"
|
|
v="V`expr $i - 1`"
|
|
${!cli} volume $operation ${!v} $force &
|
|
pids[$i]=$!
|
|
done
|
|
|
|
for i in `seq 1 $count`; do
|
|
wait ${pids[$i]}
|
|
done
|
|
}
|
|
|
|
function start_volumes() {
|
|
do_volume_operations start $1
|
|
}
|
|
|
|
function stop_volumes() {
|
|
do_volume_operations stop $1
|
|
}
|
|
|
|
function start_force_volumes() {
|
|
do_volume_operations start $1 force
|
|
}
|
|
|
|
function stop_force_volumes() {
|
|
do_volume_operations stop $1 force
|
|
}
|
|
|
|
function delete_volumes() {
|
|
do_volume_operations delete $1
|
|
}
|
|
|
|
function volume_exists() {
|
|
$CLI volume info $1 > /dev/null 2>&1
|
|
if [ $? -eq 0 ]; then
|
|
echo "Y"
|
|
else
|
|
echo "N"
|
|
fi
|
|
}
|
|
|
|
function killall_gluster() {
|
|
pkill gluster
|
|
sleep 1
|
|
}
|
|
|
|
function afr_get_index_count {
|
|
local brick=$1
|
|
ls $1/.glusterfs/indices/xattrop | grep -v xattrop | wc -l
|
|
}
|
|
|
|
function landfill_entry_count {
|
|
local brick=$1
|
|
ls $brick/.glusterfs/landfill | wc -l
|
|
}
|
|
|
|
function path_exists {
|
|
stat $1
|
|
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
|
|
}
|
|
|
|
function force_umount {
|
|
umount -f $*
|
|
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
|
|
}
|
|
|
|
function assign_gfid {
|
|
local gfid=$1
|
|
local file=$2
|
|
setfattr -n trusted.gfid -v $1 $2
|
|
}
|
|
|
|
function get_random_gfid {
|
|
echo "0x"$(uuidgen | awk -F '-' 'BEGIN {OFS=""} {print $1,$2,$3,$4,$5}')
|
|
}
|
|
|
|
function volgen_volume_exists {
|
|
local volfile="$1"
|
|
local xl_vol="$2"
|
|
local xl_type="$3"
|
|
local xl_feature="$4"
|
|
xl=$(sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d" $volfile)
|
|
if [ -z "$xl" ];
|
|
then
|
|
echo "N"
|
|
else
|
|
echo "Y"
|
|
fi
|
|
}
|
|
|
|
function volgen_volume_option {
|
|
local volfile="$1"
|
|
local xl_vol="$2"
|
|
local xl_type="$3"
|
|
local xl_feature="$4"
|
|
local xl_option="$5"
|
|
sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
|
|
}
|
|
|
|
function mount_get_option_value {
|
|
local m=$1
|
|
local subvol=$2
|
|
local key=$3
|
|
|
|
grep -w "$3" $m/.meta/graphs/active/$subvol/private | awk '{print $3}'
|
|
}
|
|
|
|
function get_volume_mark {
|
|
getfattr -n trusted.glusterfs.volume-mark -ehex $1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'
|
|
}
|
|
|
|
# setup geo-rep in a single a node.
|
|
|
|
function setup_georep {
|
|
|
|
$CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
|
|
|
|
$CLI volume start $GMV0
|
|
|
|
$CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
|
|
|
|
$CLI volume start $GSV0
|
|
|
|
$CLI system:: execute gsec_create
|
|
|
|
$CLI volume geo-rep $GMV0 $H0::$GSV0 create push-pem
|
|
|
|
$CLI volume geo-rep $GMV0 $H0::$GSV0 start
|
|
|
|
sleep 80 # after start geo-rep takes a minute to get stable
|
|
|
|
}
|
|
|
|
|
|
# stop and delete geo-rep session
|
|
|
|
function cleanup_georep {
|
|
|
|
$CLI volume geo-rep $GMV0 $H0::$GSV0 stop
|
|
|
|
$CLI volume geo-rep $GMV0 $H0::$GSV0 delete
|
|
}
|
|
|
|
function num_graphs
|
|
{
|
|
local mountpoint=$1
|
|
echo `ls $mountpoint/.meta/graphs/ | grep -v active | wc -l`
|
|
}
|
|
|
|
function get_aux()
|
|
{
|
|
##Check if a auxiliary mount is there
|
|
df -h 2>&1 | grep "/var/run/gluster/$V0" -
|
|
|
|
if [ $? -eq 0 ]
|
|
then
|
|
echo "0"
|
|
else
|
|
echo "1"
|
|
fi
|
|
}
|
|
|
|
function get_bitd_count {
|
|
ps auxw | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
|
|
}
|
|
|
|
function get_scrubd_count {
|
|
ps auxw | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
|
|
}
|
|
|