extras: Add defrag scripts to the repository

Signed-off-by: Pavan Vilas Sondur <pavan@gluster.com>
Signed-off-by: Anand V. Avati <avati@dev.gluster.com>

BUG: 478 (Add defrag scripts into glusterfs)
URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=478
This commit is contained in:
Pavan Sondur 2010-01-19 08:11:05 +00:00 committed by Anand V. Avati
parent a23185f3a4
commit a6a1f596a4
4 changed files with 126 additions and 1 deletions

View File

@ -5,5 +5,5 @@ EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d benchmarking volgen
EXTRA_DIST = specgen.scm MacOSX/Portfile glusterfs-mode.el glusterfs.vim migrate-unify-to-distribute.sh backend-xattr-sanitize.sh
EXTRA_DIST = specgen.scm MacOSX/Portfile glusterfs-mode.el glusterfs.vim migrate-unify-to-distribute.sh backend-xattr-sanitize.sh backend-cleanup.sh defrag.sh scale-n-defrag.sh

28
extras/backend-cleanup.sh Normal file
View File

@ -0,0 +1,28 @@
#!/bin/sh
# This script can be used to cleanup the 'cluster/distribute' translator's
# stale link files. One may choose to run this only when number of subvolumes
# to distribute volume gets increased (or decreased)
#
# This script has to be run on the servers, which are exporting the data to
# GlusterFS
#
# (c) 2009 Gluster Inc <http://www.gluster.com/>
set -e
# Change the below variable as per the setup.
export_directory="/export/glusterfs"
clean_dir()
{
# Clean the 'link' files on backend
find "${export_directory}" -type f -perm +01000 -exec rm -v '{}' \;
}
main()
{
clean_dir ;
}
main "$@"

60
extras/defrag.sh Normal file
View File

@ -0,0 +1,60 @@
#!/bin/sh
# This script gets called from 'scale-n-defrag.sh' script.
# Don't run this stand alone.
#
#
set -e
CP="cp"
MV="mv"
scan_dir()
{
path=$1;
find "$path" -type f -perm +01000 -exec $0 '{}' \;
}
rsync_filename()
{
path=$1
dir=$(dirname "$path");
file=$(basename "$path");
echo "$dir/.$file.zr$$";
}
relocate_file()
{
path=$1;
tmp_path=$(rsync_filename "$path");
pre_mtime=$(stat -c '%Y' "$path");
$CP -a "$path" "$tmp_path";
post_mtime=$(stat -c '%Y' "$path");
if [ $pre_mtime = $post_mtime ]; then
chmod -t "$tmp_path";
$MV "$tmp_path" "$path";
echo "file '$path' relocated"
else
echo "file '$path' modified during defrag. skipping"
rm -f "$tmp_path";
fi
}
main()
{
path="$1";
if [ -d "$path" ]; then
scan_dir "$path";
else
relocate_file "$@";
fi
usleep 500000 # 500ms
}
main "$1"

37
extras/scale-n-defrag.sh Normal file
View File

@ -0,0 +1,37 @@
#!/bin/sh
# This script runs over the GlusterFS mountpoint (from just one client)
# to handle the distribution of 'data', after the distribute translator's
# subvolumes count changes.
#
# (c) 2009 Gluster Inc, <http://www.gluster.com/>
#
#
# Make sure the following variables are properly initialized
MOUNTPOINT=/tmp/testdir
directory_to_be_scaled="${MOUNTPOINT}/"
logdir=$(dirname $0)
cd $logdir
LOGDIR=$(pwd)
cd -
# The below command is enough to make sure the new layout will be scaled across new
# nodes.
find ${directory_to_be_scaled} -type d -exec setfattr -x "trusted.glusterfs.dht" {} \;
# Now do a lookup on files so the scaling/re-hashing is done
find ${directory_to_be_scaled} > /dev/null
# copy the defrag (to copy data across for new nodes (for linkfiles))
#
cd ${directory_to_be_scaled};
for dir in *; do
echo "Defragmenting directory ${directory_to_be_scaled}/$dir ($LOGDIR/defrag-store-$dir.log)"
$LOGDIR/defrag.sh $dir >> $LOGDIR/defrag-store-$dir.log 2>&1
echo Completed directory ${directory_to_be_scaled}/$dir
done