porting: various fixes regression tests OSX/FreeBSD

- `wc -l` on OSX/FreeBSD adds spurious spaces, this clobbers
  up TAP output parsers - fix it.
- `umount -l` doesn't exist on OSX/FreeBSD use 'umount -f' if
   available.
- Add check for 'file' version, to handle mime type variations
  across versions
- Converge 'glusterfs --attribute-timeout=0 --entry-timeout=0'
  into '$GFS'
- Modify remaining 'mount -t nfs' to use 'mount_nfs'
- Update sha1sum for OSX to use 'openssl sha1'.

Change-Id: Id1012faa5d67a921513d220e7fa9cebafe830d34
BUG: 1131713
Signed-off-by: Harshavardhana <harsha@harshavardhana.net>
Reviewed-on: http://review.gluster.org/8501
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
This commit is contained in:
Harshavardhana 2014-08-19 18:24:23 -07:00
parent 04be6a47df
commit 2dd53eb4de
34 changed files with 84 additions and 59 deletions

View File

@ -606,7 +606,7 @@ AC_SUBST(SYNCDAEMON_SUBDIR)
# CDC xlator - check if libz is present if so enable HAVE_LIB_Z
BUILD_CDC=yes
PKG_CHECK_MODULES([ZLIB], [zlib >= 1.2.0],,
[AC_CHECK_LIB([z], [deflate], [LIBZ_LIBS="-lz"],
[AC_CHECK_LIB([z], [deflate], [ZLIB_LIBS="-lz"],
[BUILD_CDC=no])])
echo -n "features requiring zlib enabled: "
if test "x$BUILD_CDC" = "xyes" ; then
@ -615,8 +615,8 @@ if test "x$BUILD_CDC" = "xyes" ; then
else
echo "no"
fi
AC_SUBST(LIBZ_CFLAGS)
AC_SUBST(LIBZ_LIBS)
AC_SUBST(ZLIB_CFLAGS)
AC_SUBST(ZLIB_LIBS)
# end CDC xlator secion
# check for systemtap/dtrace

View File

@ -13,7 +13,7 @@ TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 stat-prefetch off
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
TEST $GFS --volfile-id=$V0 -s $H0 $M0;
#Test
TEST touch $M0/file

View File

@ -13,7 +13,7 @@ TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 stat-prefetch off
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
#Test
TEST $CLI volume set $V0 cluster.read-subvolume $V0-client-1

View File

@ -13,7 +13,7 @@ TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 stat-prefetch off
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
#Test
TEST mkdir -p $M0/abc/def

View File

@ -12,8 +12,8 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M1;
TEST cd $M0
TEST mkdir -p a/b/c/d/e
TEST cd a/b/c/d/e

View File

@ -20,7 +20,7 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
TEST $CLI volume set $V0 stat-prefetch off
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0;
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
###############################################################################
#1.Test successful data, metadata and entry self-heal

View File

@ -13,7 +13,7 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 data-self-heal-algorithm full
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
TEST dd if=/dev/urandom of=$M0/small count=1 bs=1024k
TEST dd if=/dev/urandom of=$M0/bigger2big count=1 bs=2048k
TEST dd if=/dev/urandom of=$M0/big2bigger count=1 bs=1024k

View File

@ -17,7 +17,7 @@ TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
TEST touch $M0/a
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST rm -f $M0/a

View File

@ -83,7 +83,7 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status'
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
EXPECT '1' volume_type
## Create posix file

View File

@ -5,6 +5,11 @@
cleanup;
function file_mime_type () {
mime_type=$(file --mime $1 2>/dev/null | sed '/^[^:]*: /s///')
echo $mime_type
}
TEST glusterd
TEST pidof glusterd
@ -44,7 +49,7 @@ EXPECT 'Started' volinfo_field $V0 'Status';
sleep 2
## Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
####################
## Testing writev ##
@ -59,7 +64,15 @@ TEST dd if=/tmp/cdc-orig of=$M0/cdc-server count=1 bs=1k 2>/dev/null
checksum[brick-file]=`md5sum $B0/${V0}1/cdc-server | cut -d' ' -f1`
## Uncompress the gzip dump file and find its md5sum
EXPECT '/tmp/cdcdump.gz: application/x-gzip; charset=binary' file -i /tmp/cdcdump.gz
# mime outputs for gzip are different for file version > 5.14
TEST touch /tmp/gzipfile
TEST gzip /tmp/gzipfile
GZIP_MIME_TYPE=$(file_mime_type /tmp/gzipfile.gz)
TEST rm -f /tmp/gzipfile.gz
EXPECT "$GZIP_MIME_TYPE" echo $(file_mime_type /tmp/cdcdump.gz)
TEST gunzip -f /tmp/cdcdump.gz
checksum[dump-file-writev]=`md5sum /tmp/cdcdump | cut -d' ' -f1`
@ -79,7 +92,9 @@ TEST dd if=$M0/cdc-server of=/tmp/cdc-client count=1 bs=1k 2>/dev/null
checksum[client-file]=`md5sum /tmp/cdc-client | cut -d' ' -f1`
## Uncompress the gzip dump file and find its md5sum
EXPECT '/tmp/cdcdump.gz: application/x-gzip; charset=binary' file -i /tmp/cdcdump.gz
# mime outputs for gzip are different for file version > 5.14
EXPECT "$GZIP_MIME_TYPE" echo $(file_mime_type /tmp/cdcdump.gz)
TEST gunzip -f /tmp/cdcdump.gz
checksum[dump-file-readv]=`md5sum /tmp/cdcdump | cut -d' ' -f1`
@ -106,7 +121,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
## Create a file of size 99 bytes on mountpoint
## This is should not be compressed

View File

@ -1,4 +1,3 @@
SIZE_LIST="1048576 1000 12345 0"
LAST_BRICK=$(($DISPERSE - 1))
@ -23,7 +22,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 redundancy $REDUNDANCY $H0:$B0/${V0}{0..$LAST_BRICK}
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
TEST dd if=/dev/urandom of=$tmp/small bs=1024 count=1
TEST dd if=/dev/urandom of=$tmp/big bs=1024 count=4096
@ -88,7 +87,7 @@ for dir in . dir1; do
TEST umount $M0
TEST $CLI volume stop $V0 force
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
cd $M0
done

View File

@ -140,7 +140,7 @@ TEST $CLI volume start $V0
EXPECT 'Started' volinfo_field $V0 'Status'
# Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST $GFS -s $H0 --volfile-id $V0 $M0
# Create local files for comparisons etc.
tmpdir=$(mktemp -d -t ${0##*/}.XXXXXX)
@ -185,7 +185,7 @@ TEST setup_perm_file $M0
# Unmount/remount so that create/write and truncate don't see cached data.
TEST umount $M0
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST $GFS -s $H0 --volfile-id $V0 $M0
# Test create/write and truncate *before* the bricks are brought back.
TEST check_create_write $M0
@ -197,7 +197,7 @@ sleep 10
# Unmount/remount again, same reason as before.
TEST umount $M0
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST $GFS -s $H0 --volfile-id $V0 $M0
# Make sure everything is as it should be. Most tests check for consistency
# between the bricks and the front end. This is not valid for disperse, so we
@ -230,4 +230,3 @@ rm -rf $tmpdir
userdel --force ${TEST_USER}
cleanup

View File

@ -11,11 +11,11 @@ TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume start $V0
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0
TEST mount_nfs $H0:/$V0 $N0 nolock
TEST dd if=/dev/zero of=$N0/test bs=1024k count=1k
## Before killing daemon to avoid deadlocks
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
cleanup

View File

@ -18,7 +18,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
TEST dd if=/dev/urandom of=$tmp/test bs=1024 count=1024
@ -96,7 +96,7 @@ cd
TEST umount $M0
TEST $CLI volume stop $V0 force
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
cd $M0
EXPECT "1048576" stat -c "%s" test2

View File

@ -21,7 +21,7 @@ TEST $CLI volume set $V0 features.file-snapshot on;
TEST $CLI volume set $V0 performance.quick-read off;
TEST $CLI volume set $V0 performance.io-cache off;
TEST glusterfs -s $H0 --volfile-id $V0 $M0 --attribute-timeout=0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
TEST touch $M0/big-file;

View File

@ -9,7 +9,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}0
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --aux-gfid-mount
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount;
TEST mkdir $M0/a
TEST touch $M0/b
a_gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/a))

View File

@ -17,7 +17,7 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
# verify json validity

View File

@ -36,14 +36,14 @@ EXPECT 'Started' volinfo_field $V0 'Status';
TEST $CLI volume set $V0 performance.stat-prefetch off;
## Mount FUSE with caching disabled (read-write)
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
## Check consistent "rw" option
TEST 'mount -t $MOUNT_TYPE_FUSE | grep -E "^$H0:$V0 "|$GREP_MOUNT_OPT_RW';
TEST 'grep -E "^$H0:$V0 .+ ,?rw,?" /proc/mounts';
## Mount FUSE with caching disabled (read-only)
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --read-only -s $H0 --volfile-id $V0 $M1;
TEST $GFS --read-only -s $H0 --volfile-id $V0 $M1;
## Check consistent "ro" option
TEST 'mount -t $MOUNT_TYPE_FUSE | grep -E "^$H0:$V0 "|$GREP_MOUNT_OPT_RO';
@ -55,7 +55,6 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
## Mount NFS
TEST mount_nfs $H0:/$V0 $N0 nolock;
## Test for consistent views between NFS and FUSE mounts
## write access to $M1 should fail
TEST ! stat $M0/newfile;

View File

@ -22,7 +22,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount FUSE with caching disabled (read-only)
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --read-only -s $H0 --volfile-id $V0 $M1;
TEST $GFS --read-only -s $H0 --volfile-id $V0 $M1;
## Wait for volume to register with rpc.mountd
sleep 5;

View File

@ -16,11 +16,11 @@ TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
TEST $CLI volume start $V0;
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $CLI volume set $V0 build-pgfid on;
TEST $CLI volume start $V0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/a;
TEST touch $M0/a/b;

View File

@ -9,7 +9,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}0
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
cd $M0
for i in {1..3}
do

View File

@ -4,6 +4,7 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
. $(dirname $0)/../dht.rc
. $(dirname $0)/../nfs.rc
cleanup;
@ -40,7 +41,7 @@ EXPECT '4' brick_count $V0
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
TEST mkdir -p $M0/test_dir/in_test_dir

View File

@ -19,7 +19,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount FUSE
TEST glusterfs -s $H0 --volfile-id $V0 $M1;
TEST $GFS -s $H0 --volfile-id $V0 $M1;
TEST $(dirname $0)/rpc-coverage.sh $M1
cleanup;

View File

@ -11,7 +11,7 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}2
TEST kill_brick $V0 $H0 $B0/${V0}4

View File

@ -19,7 +19,7 @@ TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
TEST $CLI volume start $V0;
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
for i in {1..10} ; do echo "file" > $M0/file$i ; done
@ -46,7 +46,7 @@ TEST $CLI volume set $V0 features.uss enable;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
# test 15
TEST ls $M0/.snaps;

View File

@ -17,7 +17,7 @@ TEST $CLI volume start $V0;
sleep 2
## Mount FUSE
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
## Mount NFS
TEST mount_nfs $H0:/$V0 $N0 nolock;
@ -68,4 +68,3 @@ TEST test_brick_cmds;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
cleanup;

View File

@ -39,7 +39,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount the volume
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
## Testing writev, readv, ftruncate:
## Create fragmented files and compare them with the reference files

View File

@ -17,7 +17,7 @@ TEST $CLI volume start $V0
TEST $CLI volume set $V0 readdir-ahead on
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
TEST mkdir $M0/test
for i in $(seq 0 99)

View File

@ -44,7 +44,7 @@ TEST $CLI volume start $V0
# This mount should FAIL because the identity given by our certificate does not
# match the allowed user. In other words, authentication works (they know who
# we are) but authorization doesn't (we're not the right person).
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
# Looks like /*/bin/glusterfs isn't returning error status correctly (again).
# Actually try doing something to get a real error.

View File

@ -62,7 +62,7 @@ TEST $CLI volume start $V0
EXPECT 'Started' volinfo_field $V0 'Status'
# Create some files for later tests.
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST $GFS -s $H0 --volfile-id $V0 $M0
TEST mkdir $M0/dir
TEST touch_files
TEST umount $M0

View File

@ -56,6 +56,7 @@ UMOUNT_TIMEOUT=5
statedumpdir=`gluster --print-statedumpdir`; # Default directory for statedump
CLI="gluster --mode=script --wignore";
GFS="glusterfs --attribute-timeout=0 --entry-timeout=0";
mkdir -p $B0;
mkdir -p $M0 $M1;
@ -72,7 +73,10 @@ for line in $expect_tests; do
done
IFS=$x_ifs
echo 1..$testcnt
# Remove space again since `wc -l` on OSX and FreeBSD
# adds spurious space which clobbers up TAP output
testcnt_nospace=$(echo $testcnt | tr -d ' ')
echo 1..$testcnt_nospace
t=1
@ -377,10 +381,10 @@ function cleanup()
rm -rf $GLUSTERD_WORKDIR/* $B0/* /etc/glusterd/*;
fi
umount -l $M0 2>/dev/null || true;
umount -l $M1 2>/dev/null || true;
umount -l $N0 2>/dev/null || true;
umount -l $N1 2>/dev/null || true;
umount -l $M0 2>/dev/null || umount -f $M0 2>/dev/null || true;
umount -l $M1 2>/dev/null || umount -f $M1 2>/dev/null || true;
umount -l $N0 2>/dev/null || umount -f $N0 2>/dev/null || true;
umount -l $N1 2>/dev/null || umount -f $N1 2>/dev/null || true;
}
@ -529,9 +533,18 @@ which md5sum > /dev/null || {
which sha1sum > /dev/null || {
sha1sum() {
case $OSTYPE in
Darwin)
for f in $* ; do
openssl sha1 $f | awk -F'[() ]' '{printf("%s %s\n", $4, $2)}'
done
;;
NetBSD | FreeBSD)
for f in $* ; do
sha1 $f | awk -F'[() ]' '{printf("%s %s\n", $6, $3)}'
done
;;
esac
}
}

View File

@ -13,9 +13,9 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
## Mount FUSE
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
TEST glusterfs -s $H0 --volfile-id $V0 $M1;
TEST $GFS -s $H0 --volfile-id $V0 $M1;
D0="hello-this-is-a-test-message0";
F0="test-file0";

View File

@ -13,9 +13,9 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
## Mount FUSE
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $GFS -s $H0 --volfile-id $V0 $M0;
TEST glusterfs -s $H0 --volfile-id $V0 $M1;
TEST $GFS -s $H0 --volfile-id $V0 $M1;
D0="hello-this-is-a-test-message0";
F0="test-file0";

View File

@ -7,7 +7,7 @@ noinst_HEADERS = cdc.h cdc-mem-types.h
cdc_la_LDFLAGS = -module -avoid-version
cdc_la_SOURCES = cdc.c cdc-helper.c
cdc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(LIBZ_LIBS)
cdc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(ZLIB_LIBS)
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS) \
-shared $(LIBZ_CFLAGS)