glusterfs/glusterfs.spec.in

2044 lines
77 KiB
RPMSpec
Raw Normal View History

%global _hardened_build 1
%global _for_fedora_koji_builds 0
2009-02-18 15:06:07 +03:00
# uncomment and add '%' to use the prereltag for pre-releases
# %%global prereltag qa3
##-----------------------------------------------------------------------------
## All argument definitions should be placed here and keep them sorted
##
# asan
# if you wish to compile an rpm with address sanitizer...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with asan
%{?_with_asan:%global _with_asan --enable-asan}
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
%global _with_asan %{nil}
%endif
# bd
# if you wish to compile an rpm without the BD map support...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without bd
%{?_without_bd:%global _without_bd --disable-bd-xlator}
xlator: do not call dlclose() when debugging Valgrind can not show the symbols if a .so after calling dlclose(). The unhelpful ??? in the output gets resolved properly with this change: ==25170== 344 bytes in 1 blocks are definitely lost in loss record 233 of 324 ==25170== at 0x4C29975: calloc (vg_replace_malloc.c:711) ==25170== by 0x52C7C0B: __gf_calloc (mem-pool.c:117) ==25170== by 0x12B0638A: ??? ==25170== by 0x528FCE6: __xlator_init (xlator.c:472) ==25170== by 0x528FE16: xlator_init (xlator.c:498) ==25170== by 0x52DA8D6: glusterfs_graph_init (graph.c:321) ==25170== by 0x52DB587: glusterfs_graph_activate (graph.c:695) ==25170== by 0x5046407: glfs_process_volfp (glfs-mgmt.c:79) ==25170== by 0x5043B9E: glfs_volumes_init (glfs.c:281) ==25170== by 0x5044FEC: glfs_init_common (glfs.c:986) ==25170== by 0x50451A7: glfs_init@@GFAPI_3.4.0 (glfs.c:1031) By not calling dlclose(), the dynamically loaded .so is still available upon program exit, and Valgrind is able to resolve the symbols. This will add an additional leak, so dlclose() is called for normal builds, but skipped when configuring with "./configure --enable-valgrind" or passing the "run-with-valgrind" xlator option. URL: http://valgrind.org/docs/manual/faq.html#faq.unhelpful Change-Id: I2044e21b1b8fcce32ad1a817fdd795218f967731 BUG: 1425623 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/16809 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Samikshan Bairagya <samikshan@gmail.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
2017-02-28 09:37:00 +03:00
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
%global _without_bd --without-bd
%endif
# cmocka
# if you wish to compile an rpm with cmocka unit testing...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with cmocka
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
# debug
# if you wish to compile an rpm with debugging...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with debug
%{?_with_debug:%global _with_debug --enable-debug}
# epoll
# if you wish to compile an rpm without epoll...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without epoll
%{?_without_epoll:%global _without_epoll --disable-epoll}
2009-02-18 15:06:07 +03:00
# fusermount
# if you wish to compile an rpm without fusermount...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without fusermount
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
# geo-rep
# if you wish to compile an rpm without geo-replication support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without georeplication
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
# gnfs
# if you wish to compile an rpm with the legacy gNFS server xlator
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with gnfs
%{?_with_gnfs:%global _with_gnfs --enable-gnfs}
# ipv6default
# if you wish to compile an rpm with IPv6 default...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with ipv6default
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
# libtirpc
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without libtirpc
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
%global _without_libtirpc --without-libtirpc
%endif
# ocf
# if you wish to compile an rpm without the OCF resource agents...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without ocf
%{?_without_ocf:%global _without_ocf --without-ocf}
# rdma
# if you wish to compile an rpm without rdma support, compile like this...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without rdma
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
# No RDMA Support on 32-bit ARM
%ifarch armv7hl
%global _without_rdma --disable-ibverbs
%endif
# server
# if you wish to build rpms without server components, compile like this
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server
%{?_without_server:%global _without_server --without-server}
# disable server components forcefully as rhel <= 6
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
%global _without_server --without-server
%endif
# syslog
# if you wish to build rpms without syslog logging, compile like this
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without syslog
%{?_without_syslog:%global _without_syslog --disable-syslog}
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
# Fedora deprecated syslog, see
# https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
# (And what about RHEL7?)
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
%global _without_syslog --disable-syslog
%endif
# tsan
# if you wish to compile an rpm with thread sanitizer...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with tsan
%{?_with_tsan:%global _with_tsan --enable-tsan}
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
%global _with_tsan %{nil}
%endif
# valgrind
# if you wish to compile an rpm to run all processes under valgrind...
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --with valgrind
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
##-----------------------------------------------------------------------------
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
## All %%global definitions should be placed here and keep them sorted
##
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
%endif
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
%global _with_firewalld --enable-firewalld
%endif
%if 0%{?_tmpfilesdir:1}
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
%else
%global _with_tmpfilesdir --without-tmpfilesdir
%endif
# without server should also disable some server-only components
%if 0%{?_without_server:1}
%global _without_events --disable-events
%global _without_georeplication --disable-georeplication
%global _with_gnfs %{nil}
%global _without_ocf --without-ocf
%endif
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%global _usepython3 1
%global _pythonver 3
%else
%global _usepython3 0
%global _pythonver 2
%endif
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
%global _rundir %{_localstatedir}/run
%endif
%if ( 0%{?_with_systemd:1} )
%global service_start() /bin/systemctl --quiet start %1.service || : \
%{nil}
%global service_stop() /bin/systemctl --quiet stop %1.service || :\
%{nil}
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
%{nil}
# can't seem to make a generic macro that works
%global glusterd_svcfile %{_unitdir}/glusterd.service
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
%else
%global systemd_post() /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
%{nil}
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
%{nil}
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
%{nil}
%global service_start() /sbin/service %1 start >/dev/null 2>&1 || : \
%{nil}
%global service_stop() /sbin/service %1 stop >/dev/null 2>&1 || : \
%{nil}
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
%{nil}
# can't seem to make a generic macro that works
%global glusterd_svcfile %{_sysconfdir}/init.d/glusterd
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
%endif
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
# We do not want to generate useless provides and requires for xlator
# .so files to be set for glusterfs packages.
# Filter all generated:
#
# TODO: RHEL5 does not have a convenient solution
%if ( 0%{?rhel} == 6 )
# filter_setup exists in RHEL6 only
%filter_provides_in %{_libdir}/glusterfs/%{version}/
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
%filter_setup
%else
# modern rpm and current Fedora do not generate requires when the
# provides are filtered
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
%endif
##-----------------------------------------------------------------------------
## All package definitions should be placed here in alphabetical order
##
Summary: Distributed File System
%if ( 0%{_for_fedora_koji_builds} )
Name: glusterfs
Version: 3.8.0
Release: 0.1%{?prereltag:.%{prereltag}}%{?dist}
%else
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
Release: 0.@PACKAGE_RELEASE@%{?dist}
%endif
License: GPLv2 or LGPLv3+
URL: http://docs.gluster.org/
%if ( 0%{_for_fedora_koji_builds} )
Source0: http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
Source1: glusterd.sysconfig
Source2: glusterfsd.sysconfig
Source7: glusterfsd.service
Source8: glusterfsd.init
%else
Source0: @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
%endif
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
Requires(pre): shadow-utils
%if ( 0%{?_with_systemd:1} )
BuildRequires: systemd
%endif
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
BuildRequires: libasan
%endif
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
BuildRequires: libtsan
%endif
BuildRequires: bison flex
BuildRequires: gcc make libtool
BuildRequires: ncurses-devel readline-devel
BuildRequires: libxml2-devel openssl-devel
BuildRequires: libaio-devel libacl-devel
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
BuildRequires: python%{_pythonver}-devel
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
BuildRequires: python-ctypes
%endif
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} )
BuildRequires: libtirpc-devel
%endif
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
BuildRequires: rpcgen
%endif
BuildRequires: userspace-rcu-devel >= 0.7
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
BuildRequires: automake
%endif
BuildRequires: libuuid-devel
%if ( 0%{?_with_cmocka:1} )
BuildRequires: libcmocka-devel >= 1.0.1
%endif
%if ( 0%{!?_without_georeplication:1} )
BuildRequires: libattr-devel
%endif
2009-02-18 15:06:07 +03:00
%if (0%{?_with_firewalld:1})
BuildRequires: firewalld
%endif
Obsoletes: hekafs
Obsoletes: %{name}-common < %{version}-%{release}
Obsoletes: %{name}-core < %{version}-%{release}
Obsoletes: %{name}-ufo
Obsoletes: %{name}-ganesha
%if ( 0%{!?_with_gnfs:1} )
Obsoletes: %{name}-gnfs
%endif
Provides: %{name}-common = %{version}-%{release}
Provides: %{name}-core = %{version}-%{release}
2009-02-18 15:06:07 +03:00
%description
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package includes the glusterfs binary, the glusterfsd daemon and the
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
libglusterfs and glusterfs translator modules common to both GlusterFS server
and client framework.
%package api
Summary: GlusterFS api library
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
%description api
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the glusterfs libgfapi library.
%package api-devel
Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-devel%{?_isa} = %{version}-%{release}
Requires: libacl-devel
%description api-devel
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the api include files.
%if ( 0%{!?_without_server:1} )
%package cli
Summary: GlusterFS CLI
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
%description cli
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the GlusterFS CLI application and its man page
%endif
%package cloudsync-plugins
Summary: Cloudsync Plugins
BuildRequires: libcurl-devel
%description cloudsync-plugins
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides cloudsync plugins for archival feature.
%package devel
Summary: Development Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
# Needed for the Glupy examples to work
Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
%description devel
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the development libraries and include files.
%package extra-xlators
Summary: Extra Gluster filesystem Translators
# We need python-gluster rpm for gluster module's __init__.py in Python
# site-packages area
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: python%{_pythonver}
%description extra-xlators
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
%package fuse
Summary: Fuse client
BuildRequires: fuse-devel
Requires: attr
Requires: psmisc
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
Obsoletes: %{name}-client < %{version}-%{release}
Provides: %{name}-client = %{version}-%{release}
2009-02-18 15:06:07 +03:00
%description fuse
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
2009-02-18 15:06:07 +03:00
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
2009-02-18 15:06:07 +03:00
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
2009-02-18 15:06:07 +03:00
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.
2009-02-18 15:06:07 +03:00
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary: GlusterFS Geo-replication
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-server%{?_isa} = %{version}-%{release}
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver}
Requires: python%{_pythonver}-prettytable
Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.
This package provides support to geo-replication.
%endif
%if ( 0%{?_with_gnfs:1} )
%package gnfs
Summary: GlusterFS gNFS server
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
Requires: nfs-utils
%description gnfs
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the glusterfs legacy gNFS server xlator
%endif
%package libs
Summary: GlusterFS common libraries
%description libs
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the base GlusterFS libraries
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%package -n python%{_pythonver}-gluster
Summary: GlusterFS python library
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver}
%if ( ! %{_usepython3} )
%{?python_provide:%python_provide python-gluster}
Provides: python-gluster = %{version}-%{release}
Obsoletes: python-gluster < 3.10
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%endif
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%description -n python%{_pythonver}-gluster
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package contains the python modules of GlusterFS and own gluster
namespace.
%if ( 0%{!?_without_rdma:1} )
%package rdma
Summary: GlusterFS rdma support for ib-verbs
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
BuildRequires: rdma-core-devel
%else
BuildRequires: libibverbs-devel
BuildRequires: librdmacm-devel >= 1.0.15
%endif
Requires: %{name}%{?_isa} = %{version}-%{release}
%description rdma
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides support to ib-verbs library.
%endif
%package regression-tests
Summary: Development Tools
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-fuse%{?_isa} = %{version}-%{release}
Requires: %{name}-server%{?_isa} = %{version}-%{release}
## thin provisioning support
Requires: lvm2 >= 2.02.89
Requires: perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver}
Requires: attr dbench file git libacl-devel net-tools
Requires: nfs-utils xfsprogs yajl psmisc bc
%description regression-tests
The Gluster Test Framework, is a suite of scripts used for
regression testing of Gluster.
%if ( 0%{!?_without_ocf:1} )
%package resource-agents
Summary: OCF Resource Agents for GlusterFS
License: GPLv3+
BuildArch: noarch
# this Group handling comes from the Fedora resource-agents package
# for glusterd
Requires: %{name}-server = %{version}-%{release}
# depending on the distribution, we need pacemaker or resource-agents
Requires: %{_prefix}/lib/ocf/resource.d
%description resource-agents
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the resource agents which plug glusterd into
Open Cluster Framework (OCF) compliant cluster resource managers,
like Pacemaker.
%endif
%if ( 0%{!?_without_server:1} )
%package server
Summary: Clustered file-system server
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-cli%{?_isa} = %{version}-%{release}
Requires: %{name}-libs%{?_isa} = %{version}-%{release}
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
Requires: %{name}-fuse%{?_isa} = %{version}-%{release}
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
# self-heal daemon, rebalance, nfs-server etc. are actually clients
Requires: %{name}-api%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
Requires: lvm2
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%else
Requires(post): /sbin/chkconfig
Requires(preun): /sbin/service
Requires(preun): /sbin/chkconfig
Requires(postun): /sbin/service
%endif
%if (0%{?_with_firewalld:1})
# we install firewalld rules, so we need to have the directory owned
%if ( 0%{!?rhel} )
# not on RHEL because firewalld-filesystem appeared in 7.3
# when EL7 rpm gets weak dependencies we can add a Suggests:
Requires: firewalld-filesystem
%endif
%endif
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
Requires: rpcbind
%else
Requires: portmap
%endif
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
Requires: python-argparse
%endif
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
Requires: python%{_pythonver}-pyxattr
%else
Requires: pyxattr
%endif
xlator: do not call dlclose() when debugging Valgrind can not show the symbols if a .so after calling dlclose(). The unhelpful ??? in the output gets resolved properly with this change: ==25170== 344 bytes in 1 blocks are definitely lost in loss record 233 of 324 ==25170== at 0x4C29975: calloc (vg_replace_malloc.c:711) ==25170== by 0x52C7C0B: __gf_calloc (mem-pool.c:117) ==25170== by 0x12B0638A: ??? ==25170== by 0x528FCE6: __xlator_init (xlator.c:472) ==25170== by 0x528FE16: xlator_init (xlator.c:498) ==25170== by 0x52DA8D6: glusterfs_graph_init (graph.c:321) ==25170== by 0x52DB587: glusterfs_graph_activate (graph.c:695) ==25170== by 0x5046407: glfs_process_volfp (glfs-mgmt.c:79) ==25170== by 0x5043B9E: glfs_volumes_init (glfs.c:281) ==25170== by 0x5044FEC: glfs_init_common (glfs.c:986) ==25170== by 0x50451A7: glfs_init@@GFAPI_3.4.0 (glfs.c:1031) By not calling dlclose(), the dynamically loaded .so is still available upon program exit, and Valgrind is able to resolve the symbols. This will add an additional leak, so dlclose() is called for normal builds, but skipped when configuring with "./configure --enable-valgrind" or passing the "run-with-valgrind" xlator option. URL: http://valgrind.org/docs/manual/faq.html#faq.unhelpful Change-Id: I2044e21b1b8fcce32ad1a817fdd795218f967731 BUG: 1425623 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: https://review.gluster.org/16809 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Samikshan Bairagya <samikshan@gmail.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
2017-02-28 09:37:00 +03:00
%if (0%{?_with_valgrind:1})
Requires: valgrind
%endif
%description server
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the glusterfs server daemon.
%endif
%package thin-arbiter
Summary: GlusterFS thin-arbiter module
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-server%{?_isa} = %{version}-%{release}
%description thin-arbiter
This package provides a tie-breaker functionality to GlusterFS
replicate volume. It includes translators required to provide the
functionality, and also few other scripts required for getting the setup done.
This package provides the glusterfs thin-arbiter translator.
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%package client-xlators
Summary: GlusterFS client-side translators
%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
This package provides the translators needed on any GlusterFS client.
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
%if ( 0%{!?_without_events:1} )
%package events
Summary: GlusterFS Events
Requires: %{name}-server%{?_isa} = %{version}-%{release}
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver} python%{_pythonver}-prettytable
Requires: python%{_pythonver}-gluster = %{version}-%{release}
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
Requires: python-requests
%else
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
Requires: python%{_pythonver}-requests
%endif
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
Requires: python-argparse
%endif
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
%description events
GlusterFS Events
%endif
2009-02-18 15:06:07 +03:00
%prep
%setup -q -n %{name}-%{version}%{?prereltag}
%if ( ! %{_usepython3} )
echo "fixing python shebangs..."
for f in api events extras geo-replication libglusterfs tools xlators; do
find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
done
%endif
2009-02-18 15:06:07 +03:00
%build
# RHEL6 and earlier need to manually replace config.guess and config.sub
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
./autogen.sh
%endif
%configure \
%{?_with_asan} \
%{?_with_cmocka} \
%{?_with_debug} \
%{?_with_firewalld} \
%{?_with_gnfs} \
%{?_with_tmpfilesdir} \
%{?_with_tsan} \
%{?_with_valgrind} \
%{?_without_epoll} \
%{?_without_events} \
%{?_without_fusermount} \
%{?_without_georeplication} \
%{?_without_ocf} \
%{?_without_rdma} \
%{?_without_server} \
%{?_without_syslog} \
%{?_with_ipv6default} \
%{?_without_libtirpc}
# fix hardening and remove rpath in shlibs
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
%endif
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
make %{?_smp_mflags}
2009-02-18 15:06:07 +03:00
%check
make check
2009-02-18 15:06:07 +03:00
%install
rm -rf %{buildroot}
make install DESTDIR=%{buildroot}
%if ( 0%{!?_without_server:1} )
%if ( 0%{_for_fedora_koji_builds} )
install -D -p -m 0644 %{SOURCE1} \
%{buildroot}%{_sysconfdir}/sysconfig/glusterd
install -D -p -m 0644 %{SOURCE2} \
%{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
%else
install -D -p -m 0644 extras/glusterd-sysconfig \
%{buildroot}%{_sysconfdir}/sysconfig/glusterd
%endif
%endif
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
mkdir -p %{buildroot}%{_rundir}/gluster
# Remove unwanted files from all the shared libraries
find %{buildroot}%{_libdir} -name '*.a' -delete
find %{buildroot}%{_libdir} -name '*.la' -delete
2009-02-18 15:06:07 +03:00
# Remove installed docs, the ones we want are included by %%doc, in
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
# on the distribution
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
rm -rf %{buildroot}%{_pkgdocdir}/*
%else
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
mkdir -p %{buildroot}%{_pkgdocdir}
%endif
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
cat << EOM >> ChangeLog
More commit messages for this ChangeLog can be found at
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
EOM
# Remove benchmarking and other unpackaged files
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
%if ( 0%{!?_without_server:1} )
# Create working directory
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
# Update configuration file to /var/lib working directory
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
%{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
%endif
# Install glusterfsd .service or init.d file
%if ( 0%{!?_without_server:1} )
%if ( 0%{_for_fedora_koji_builds} )
%service_install glusterfsd %{glusterfsd_svcfile}
%endif
%endif
install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
%if ( 0%{!?_without_georeplication:1} )
# geo-rep ghosts
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
glusterd/cli changes for distributed geo-rep Commands: gluster system:: execute gsec_create gluster volume geo-rep <master> <slave-url> create [push-pem] [force] gluster volume geo-rep <master> <slave-url> start [force] gluster volume geo-rep <master> <slave-url> stop [force] gluster volume geo-rep <master> <slave-url> delete gluster volume geo-rep <master> <slave-url> config gluster volume geo-rep <master> <slave-url> status The geo-replication is distributed. The session will be created, and gsyncd will be spawned on all relevant nodes, instead of only one node. geo-rep: Collecting status detail related data Added persistent store for saving information about TotalFilesSynced, TotalSyncTime, TotalBytesSynced Changes in the status information in socket: Existing(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01; New(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01;SyncTime=0.69978; TotalSyncTime=2.890044;TotalFilesSynced=6;TotalBytesSynced=143640; Persistent details stored in /var/lib/glusterd/geo-replication/${mastervol}/${eSlave}-detail.status Change-Id: I1db7fc13ffca2e415c05200b0109b1254067f111 BUG: 847839 Original Author: Avra Sengupta <asengupt@redhat.com> Original Author: Venky Shankar <vshankar@redhat.com> Original Author: Aravinda VK <avishwan@redhat.com> Original Author: Amar Tumballi <amarts@redhat.com> Original Author: Csaba Henk <csaba@redhat.com> Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/5132 Reviewed-by: Vijay Bellur <vbellur@redhat.com> Tested-by: Vijay Bellur <vbellur@redhat.com>
2013-07-10 16:02:41 +04:00
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif
%if ( 0%{!?_without_server:1} )
# the rest of the ghosts
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
for dir in ${subdirs[@]}; do
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
done
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
%endif
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
## Install bash completion for cli
%if ( 0%{!?_without_server:1} )
install -p -m 0744 -D extras/command-completion/gluster.bash \
%{buildroot}%{_sysconfdir}/bash_completion.d/gluster
%endif
%clean
rm -rf %{buildroot}
##-----------------------------------------------------------------------------
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
## All %%post should be placed here and keep them sorted
##
%post
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
/sbin/ldconfig
%if ( 0%{!?_without_syslog:1} )
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
%systemd_postun_with_restart rsyslog
%endif
%endif
exit 0
%post api
/sbin/ldconfig
%if ( 0%{!?_without_events:1} )
%post events
%systemd_post glustereventsd
%endif
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
if [ $1 -ge 1 ]; then
%systemd_postun_with_restart glusterd
fi
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
exit 0
%endif
%post libs
/sbin/ldconfig
%if ( 0%{!?_without_server:1} )
%post server
# Legacy server
%systemd_post glusterd
%if ( 0%{_for_fedora_koji_builds} )
%systemd_post glusterfsd
%endif
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
# "cmd_history.log" to retain cli command history contents.
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
%{_localstatedir}/log/glusterfs/cmd_history.log
fi
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
# there are any files in /etc from a prior gluster.org install, move them
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
# in gluster.org RPMs.) Be careful to copy them on the off chance that
# /etc and /var/lib are on separate file systems
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
mkdir -p %{_sharedstatedir}/glusterd
cp -a /etc/glusterd %{_sharedstatedir}/glusterd
rm -rf /etc/glusterd
ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
fi
# Rename old volfiles in an RPM-standard way. These aren't actually
# considered package config files, so %%config doesn't work for them.
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
newfile=${file}.rpmsave
echo "warning: ${file} saved as ${newfile}"
cp ${file} ${newfile}
done
fi
# add marker translator
# but first make certain that there are no old libs around to bite us
# BZ 834847
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
rm -f /etc/ld.so.conf.d/glusterfs.conf
/sbin/ldconfig
fi
%if (0%{?_with_firewalld:1})
%firewalld_reload
%endif
pidof -c -o %PPID -x glusterd &> /dev/null
if [ $? -eq 0 ]; then
kill -9 `pgrep -f gsyncd.py` &> /dev/null
killall --wait glusterd &> /dev/null
glusterd --xlator-option *.upgrade=on -N
#Cleaning leftover glusterd socket file which is created by glusterd in
#rpm_script_t context.
rm -f %{_rundir}/glusterd.socket
# glusterd _was_ running, we killed it, it exited after *.upgrade=on,
# so start it again
%service_start glusterd
else
glusterd --xlator-option *.upgrade=on -N
#Cleaning leftover glusterd socket file which is created by glusterd in
#rpm_script_t context.
rm -f %{_rundir}/glusterd.socket
fi
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
exit 0
%endif
##-----------------------------------------------------------------------------
## All %%pre should be placed here and keep them sorted
##
%pre
getent group gluster > /dev/null || groupadd -r gluster
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
exit 0
##-----------------------------------------------------------------------------
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
## All %%preun should be placed here and keep them sorted
##
%if ( 0%{!?_without_events:1} )
%preun events
if [ $1 -eq 0 ]; then
if [ -f %glustereventsd_svcfile ]; then
%service_stop glustereventsd
%systemd_preun glustereventsd
fi
fi
exit 0
%endif
%if ( 0%{!?_without_server:1} )
%preun server
if [ $1 -eq 0 ]; then
if [ -f %glusterfsd_svcfile ]; then
%service_stop glusterfsd
fi
%service_stop glusterd
if [ -f %glusterfsd_svcfile ]; then
%systemd_preun glusterfsd
fi
%systemd_preun glusterd
fi
if [ $1 -ge 1 ]; then
if [ -f %glusterfsd_svcfile ]; then
%systemd_postun_with_restart glusterfsd
fi
%systemd_postun_with_restart glusterd
fi
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
exit 0
%endif
%preun thin-arbiter
if [ $1 -eq 0 ]; then
if [ -f %glusterta_svcfile ]; then
%service_stop gluster-ta-volume
%systemd_preun gluster-ta-volume
fi
fi
##-----------------------------------------------------------------------------
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
## All %%postun should be placed here and keep them sorted
##
%postun
%if ( 0%{!?_without_syslog:1} )
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
%systemd_postun_with_restart rsyslog
%endif
%endif
%if ( 0%{!?_without_server:1} )
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
%postun server
%if (0%{?_with_firewalld:1})
%firewalld_reload
%endif
exit 0
%endif
##-----------------------------------------------------------------------------
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
## All %%files should be placed here and keep them grouped
##
%files
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
%{_mandir}/man8/*gluster*.8*
%if ( 0%{!?_without_server:1} )
%exclude %{_mandir}/man8/gluster.8*
%endif
%dir %{_localstatedir}/log/glusterfs
%if ( 0%{!?_without_rdma:1} )
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif
%if 0%{?!_without_server:1}
%dir %{_datadir}/glusterfs
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
%{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
%endif
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
# xlators that are needed on the client- and on the server-side
%dir %{_libdir}/glusterfs
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
debug/delay-gen: Implement delay-generation feature Background: I was working on a customer issue where the disks were responding some times after seconds. It was becoming very difficult to recreate the issues in our labs, so had to come up with this feature. Requirements: We need an xlator which can delay x% of ops for y micro seconds. We should be able to enable delays for specific fops. This feature is modeled after error-gen. Most of the logic is borrowed from that xlator. This is a minimum implementation of the feature which satisfied the requirements I had. May be in future with more requirements and understanding of the problem further we can improve upon this implementation. Here are the commands and what they do: Enable delay-gen: (This is similar to how err-gen is enabled on the brick side) - gluster volume set <volname> delay-gen posix Set the percentage of fops that need to be delayed - gluster volume set <volname> delay-gen.delay-percentage 50 Default is 10% Set the delay in micro seconds - gluster volume set <volname> delay-gen.delay-duration 500000 Default is 100000 Set comma separated fops to be delayed - gluster v set r2 delay-gen.enable read,write Default is all fops. Fixes #257 Change-Id: Ib547bd39cc024c9cdb63754d21e3aa62fc9d6473 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: https://review.gluster.org/17591 Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
2017-06-20 12:54:33 +03:00
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
experimental/cloudsync: Download xlator for archival feature spec-files: https://review.gluster.org/#/c/18854/ Overview: * Cloudsync maintains three file states in it's inode-ctx i.e 1 - LOCAL, 2 - REMOTE, 3 - DOWNLOADING. * A data modifying fop is allowed only if the state is LOCAL. If the state is REMOTE or DOWNLOADING, client will download or wait for the download to finish initiated by other client. * Multiple download and upload from different clients are synchronized by inodelk. * In POSIX a state check is done (part of different commit)before allowing the fop to continue. If the state is remote/downloading the fop is unwound with EREMOTE. The client will then download the file and continue with the fop again. * Basic Algo for fop (let's say write fop): - If LOCAL -> resume fop - If REMOTE -> - INODELK - STAT (this gets state and heal the state if needed) - DOWNLOAD - resume fop Note: * Developers will need to write plugins for download, based on the remote store they choose. In phase-1, support will be added for one remote store per volume. In future, more options for multiple remote stores will be explored. TODOs: - Implement stat/lookup/readdirp to return size info from xattr - Make plugins configurable - Implement unlink fop - Add metrics collection - Add sharding support Design Contributions: Aravinda V K <avishwan@redhat.com> Amar Tumballi <amarts@redhat.com> Ram Ankireddypalle <areddy@commvault.com> Susant Palai <spalai@redhat.com> updates: #387 Change-Id: Iddf711ee7ab4e946ae3e472ff62791a7b85e6d4b Signed-off-by: Susant Palai <spalai@redhat.com>
2018-03-09 17:37:19 +03:00
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
%{_tmpfilesdir}/gluster.conf
%endif
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%files api
%exclude %{_libdir}/*.so
# libgfapi files
%{_libdir}/libgfapi.*
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
%files api-devel
%{_libdir}/pkgconfig/glusterfs-api.pc
%{_libdir}/libgfapi.so
%dir %{_includedir}/glusterfs
%dir %{_includedir}/glusterfs/api
%{_includedir}/glusterfs/api/*
%if ( 0%{!?_without_server:1} )
%files cli
%{_sbindir}/gluster
%{_mandir}/man8/gluster.8*
%{_sysconfdir}/bash_completion.d/gluster
%endif
%files cloudsync-plugins
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
%{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
%files devel
%dir %{_includedir}/glusterfs
%{_includedir}/glusterfs/*
%exclude %{_includedir}/glusterfs/api
%exclude %{_libdir}/libgfapi.so
%{_libdir}/*.so
%{_libdir}/pkgconfig/libgfchangelog.pc
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%files client-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
%files extra-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
%files fuse
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
%{_sbindir}/glusterfs
%{_sbindir}/glusterfsd
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
/sbin/mount.glusterfs
%if ( 0%{!?_without_fusermount:1} )
%{_bindir}/fusermount-glusterfs
%endif
%if ( 0%{?_with_gnfs:1} && 0%{!?_without_server:1} )
%files gnfs
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/server.so
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
%ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
%endif
%files thin-arbiter
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/thin-arbiter.so
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
%config %{_sysconfdir}/glusterfs/thin-arbiter.vol
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/gluster-ta-volume.service
%endif
%if ( 0%{!?_without_georeplication:1} )
%files geo-replication
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
%{_sbindir}/gfind_missing_files
%{_sbindir}/gluster-mountbroker
%dir %{_libexecdir}/glusterfs
%dir %{_libexecdir}/glusterfs/python
%dir %{_libexecdir}/glusterfs/python/syncdaemon
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
%{_libexecdir}/glusterfs/gverify.sh
%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_gsec_create
%{_libexecdir}/glusterfs/peer_mountbroker
%{_libexecdir}/glusterfs/peer_mountbroker.py*
%{_libexecdir}/glusterfs/gfind_missing_files
%{_libexecdir}/glusterfs/peer_georep-sshkey.py*
%{_sbindir}/gluster-georep-sshkey
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
%ghost %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
%dir %{_datadir}/glusterfs
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/get-gfid.sh
%{_datadir}/glusterfs/scripts/slave-upgrade.sh
%{_datadir}/glusterfs/scripts/gsync-upgrade.sh
%{_datadir}/glusterfs/scripts/generate-gfid-file.sh
%{_datadir}/glusterfs/scripts/gsync-sync-gfid
%{_datadir}/glusterfs/scripts/schedule_georep.py*
%endif
%files libs
%{_libdir}/*.so.*
%exclude %{_libdir}/libgfapi.*
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%files -n python%{_pythonver}-gluster
# introducing glusterfs module in site packages.
# so that all other gluster submodules can reside in the same namespace.
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%if ( %{_usepython3} )
%dir %{python3_sitelib}/gluster
%{python3_sitelib}/gluster/__init__.*
%{python3_sitelib}/gluster/__pycache__
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%{python3_sitelib}/gluster/cliutils
%else
%dir %{python2_sitelib}/gluster
%{python2_sitelib}/gluster/__init__.*
%{python2_sitelib}/gluster/cliutils
core: python3 see https://review.gluster.org/#/c/19788/, https://review.gluster.org/#/c/19871/, https://review.gluster.org/#/c/19952/, https://review.gluster.org/#/c/20104/, https://review.gluster.org/#/c/20162/, https://review.gluster.org/#/c/20185/, https://review.gluster.org/#/c/20207/, https://review.gluster.org/#/c/20227/, https://review.gluster.org/#/c/20307/, https://review.gluster.org/#/c/20320/, https://review.gluster.org/#/c/20332/, https://review.gluster.org/#/c/20364/, https://review.gluster.org/#/c/20441/, and https://review.gluster.org/#/c/20484 shebangs changed from /usr/bin/python2 to /usr/bin/python3. (Reminder, various distribution packaging guidelines require use of explicit python version and don't allow '#!/usr/bin/env python', regardless of how handy that idiom may be.) glusterfs.spec(.in) package python{2,3}-gluster and python2 or python3 dependencies as appropriate. configure(.ac): + test for and use python2 or python3 as appropriate. If build machine has python2 and python3, use python3. Override by setting PYTHON=/usr/bin/python2 when running configure. + PYTHONDEV_CPPFLAGS from python[23]-config --includes is a better match to the original python sysconfig.get_python_inc(). All those other extraneous flags breaks the build. + Only change the shebangs once. Changing them over and over again, e.g., during a `make glusterrpms` in extras/LinuxRPM just sends make (is it really make that's looping?) into an infinite loop. If you figure out why, let me know. + Oldest python2 is python2.6 on CentOS 6 and Debian 8 (Jessie). Everything else has 2.7 or 3.x + logic from https://review.gluster.org/c/glusterfs/+/21050, which needs to be removed/merged after that patch is merged. Builds on CentOS 6, CentOS 7, Fedora 28, Fedora rawhide, and the mysterious RHEL > 7. Change-Id: Idae21d3b6f58b32372e1daa0d234e491e563198f updates: #411 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
2018-08-29 18:09:27 +03:00
%endif
%if ( 0%{!?_without_rdma:1} )
%files rdma
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif
%files regression-tests
%dir %{_datadir}/glusterfs
%{_datadir}/glusterfs/run-tests.sh
%{_datadir}/glusterfs/tests
%exclude %{_datadir}/glusterfs/tests/vagrant
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
# /usr/lib is the standard for OCF, also on x86_64
%{_prefix}/lib/ocf/resource.d/glusterfs
%endif
%if ( 0%{!?_without_server:1} )
%files server
%doc extras/clear_xattrs.sh
# sysconf
%config(noreplace) %{_sysconfdir}/glusterfs
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
%exclude %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
%exclude %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
%if ( 0%{?_with_gnfs:1} )
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs/*
%endif
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
%if ( 0%{_for_fedora_koji_builds} )
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
%endif
# init files
%glusterd_svcfile
%if ( 0%{_for_fedora_koji_builds} )
%glusterfsd_svcfile
%endif
%if ( 0%{?_with_systemd:1} )
%glusterfssharedstorage_svcfile
%endif
# binaries
%{_sbindir}/glusterd
%{_sbindir}/glfsheal
%{_sbindir}/gf_attach
%{_sbindir}/gluster-setgfid2path
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
# package, because glusterfs-server depends on that anyway.
# Manpages
%{_mandir}/man8/gluster-setgfid2path.8*
# xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
dentry fop serializer: added new server side xlator for dentry fop serialization Problems addressed by this xlator : [1]. To prevent race between parallel mkdir,mkdir and lookup etc. Fops like mkdir/create, lookup, rename, unlink, link that happen on a particular dentry must be serialized to ensure atomicity. Another possible case can be a fresh lookup to find existance of a path whose gfid is not set yet. Further, storage/posix employs a ctime based heuristic 'is_fresh_file' (interval time is less than 1 second of current time) to check fresh-ness of file. With serialization of these two fops (lookup & mkdir), we eliminate the race altogether. [2]. Staleness of dentries This causes exponential increase in traversal time for any inode in the subtree of the directory pointed by stale dentry. Cause : Stale dentry is created because of following two operations: a. dentry creation due to inode_link, done during operations like lookup, mkdir, create, mknod, symlink, create and b. dentry unlinking due to various operations like rmdir, rename, unlink. The reason is __inode_link uses __is_dentry_cyclic, which explores all possible path to avoid cyclic link formation during inode linkage. __is_dentry_cyclic explores stale-dentry(ies) and its all ancestors which is increases traversing time exponentially. Implementation : To acheive this all fops on dentry must take entry locks before they proceed, once they have acquired locks, they perform the fop and then release the lock. Some documentation from email conversation: [1] http://www.gluster.org/pipermail/gluster-devel/2015-December/047314.html [2] http://www.gluster.org/pipermail/gluster-devel/2015-August/046428.html With this patch, the feature is optional, enable it by running: `gluster volume set $volname features.sdfs enable` Also the feature is tested for a month without issues in the experiemental branch for all the regression. Change-Id: I6e80ba3cabfa6facd5dda63bd482b9bf18b6b79b Fixes: #397 BUG: 1304962 Signed-off-by: Sakshi Bansal <sabansal@redhat.com> Signed-off-by: Amar Tumballi <amarts@redhat.com> Signed-off-by: Sunny Kumar <sunkumar@redhat.com>
2018-01-22 12:08:17 +03:00
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
# snap_scheduler
%{_sbindir}/snap_scheduler.py
%{_sbindir}/gcron.py
snapshot/eventsapi: Integrate snapshot events with eventsapi 1. EVENT_SNAPSHOT_CREATED : snapshot_name=snap1 volume_name=test_vol snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 2. EVENT_SNAPSHOT_CREATE_FAILED : snapshot_name=snap1 volume_name=test_vol error=Snapshot snap1 already exists 3. EVENT_SNAPSHOT_ACTIVATED : snapshot_name=snap1 snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 4. EVENT_SNAPSHOT_ACTIVATE_FAILED: snapshot_name=snap1 error=Snapshot snap1 is already activated. 5. EVENT_SNAPSHOT_DEACTIVATED : snapshot_name=snap1 snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 6. EVENT_SNAPSHOT_DEACTIVATE_FAILED : snapshot_name=snap3 error=Snapshot (snap3) does not exist. 7. EVENT_SNAPSHOT_SOFT_LIMIT_REACHED : volume_name=test_vol volume_id=2ace2616-5591-4b9b-be2a-38592dda5758 8. EVENT_SNAPSHOT_HARD_LIMIT_REACHED : volume_name=test_vol volume_id=2ace2616-5591-4b9b-be2a-38592dda5758 9. EVENT_SNAPSHOT_RESTORED : snapshot_name=snap1 volume_name=test_vol snapshot_uuid=3a840ec5-08da-4f2b-850d-1d5539a5d14d 10. EVENT_SNAPSHOT_RESTORE_FAILED : snapshot_name=snap10 error=Snapshot (snap10) does not exist 11. EVENT_SNAPSHOT_DELETED : snapshot_name=snap1 snapshot_uuid=d9ff3d4f-f579-4345-a4da-4f9353f0950c 12. EVENT_SNAPSHOT_DELETE_FAILED : snapshot_name=snap2 error=Snapshot (snap2) does not exist 13. EVENT_SNAPSHOT_CLONED : clone_uuid=93ba9f06-cb9c-4ace-aa52-2616e7f31022 snapshot_name=snap1 clone_name=clone2 14. EVENT_SNAPSHOT_CLONE_FAILED : snapshot_name=snap1 clone_name=clone2 error=Volume with name:clone2 already exists 15. EVENT_SNAPSHOT_CONFIG_UPDATED : auto-delete=enable config_type=system_config config_type=volume_config hard_limit=100 16. EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED : error=Invalid snap-max-soft-limit 110. Expected range 1 - 100 17. EVENT_SNAPSHOT_SCHEDULER_INITIALISED : status=Success 18. EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED 19. EVENT_SNAPSHOT_SCHEDULER_ENABLED : status=Successfuly Enabled 20. EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED : error=Snapshot scheduler is already enabled. 21. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED : status=Successfuly added job job1 22. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED : status=Failed to add job job1 error=The job already exists. 23. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED : status=Successfuly edited job job1 24. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED : status=Failed to edit job job2 error=The job cannot be found. 25. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED : status=Successfuly deleted job job1 26. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED : status=Failed to delete job job1 error=The job cannot be found. 27. EVENT_SNAPSHOT_SCHEDULER_DISABLED : status=Successfuly Disabled 28. EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED : error=Snapshot scheduler is already disabled. Change-Id: I3479cc3fb7af3c76ded67cf289f99547d0a55d21 BUG: 1370567 Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/15329 Tested-by: Aravinda VK <avishwan@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
2016-08-26 11:35:07 +03:00
%{_sbindir}/conf.py
# /var/lib/glusterd, e.g. hookscripts, etc.
%ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post/S10selinux-label-brick.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
%{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre/S10selinux-del-fcontext.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
%config(noreplace) %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
# Extra utility script
%dir %{_libexecdir}/glusterfs
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
%if ( 0%{?_with_systemd:1} )
%{_libexecdir}/glusterfs/mount-shared-storage.sh
%{_datadir}/glusterfs/scripts/control-cpu-load.sh
%{_datadir}/glusterfs/scripts/control-mem.sh
%endif
feature/glusterfind: A tool to find incremental changes Documentation is available in patch: http://review.gluster.org/#/c/9800/ A tool which helps to get list of modified files or list of all files in GlusterFS Volume using Changelog or find command. Usage ===== glusterfind --help Create: ------- glusterfind create --help The tool creates status file $GLUSTERD_WORKDIR/SESSION/VOLUME/status and records current timestamp to initiate the session. This timestamp will be used as start time for next runs. As part of create also generates ssh key and distributes to all peers. and enables build.pgfid and changelog using volume set command. Pre: ---- glusterfind pre --help This command is used to generate the list of files modified after session creation time or after last run. To get list of all files/dirs in Volume, run pre command with `--full` argument. The tool gets all nodes details using gluster volume info and runs node agent for each brick in respective nodes via ssh command. Once these node agents generate the output file, tool copies to local using scp. Merges all the output files to generate the final output file. Post: ----- glusterfind post --help After consuming the list, this sub command is called to update the session time based on pre command status file. List: ----- glusterfind list --help To view all the sessions Delete: ------- glusterfind delete --help Delete session. Known Issues ------------ 1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted. 2. Only new name will get listed if Renamed. 3. All hardlinks will get listed. Change-Id: I82991feb0aea85cb6ec035fddbf80a2b276e86b0 BUG: 1193893 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9682 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-02-18 16:37:23 +03:00
# Incrementalapi
%{_libexecdir}/glusterfs/glusterfind
feature/glusterfind: A tool to find incremental changes Documentation is available in patch: http://review.gluster.org/#/c/9800/ A tool which helps to get list of modified files or list of all files in GlusterFS Volume using Changelog or find command. Usage ===== glusterfind --help Create: ------- glusterfind create --help The tool creates status file $GLUSTERD_WORKDIR/SESSION/VOLUME/status and records current timestamp to initiate the session. This timestamp will be used as start time for next runs. As part of create also generates ssh key and distributes to all peers. and enables build.pgfid and changelog using volume set command. Pre: ---- glusterfind pre --help This command is used to generate the list of files modified after session creation time or after last run. To get list of all files/dirs in Volume, run pre command with `--full` argument. The tool gets all nodes details using gluster volume info and runs node agent for each brick in respective nodes via ssh command. Once these node agents generate the output file, tool copies to local using scp. Merges all the output files to generate the final output file. Post: ----- glusterfind post --help After consuming the list, this sub command is called to update the session time based on pre command status file. List: ----- glusterfind list --help To view all the sessions Delete: ------- glusterfind delete --help Delete session. Known Issues ------------ 1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted. 2. Only new name will get listed if Renamed. 3. All hardlinks will get listed. Change-Id: I82991feb0aea85cb6ec035fddbf80a2b276e86b0 BUG: 1193893 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9682 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-02-18 16:37:23 +03:00
%{_bindir}/glusterfind
%{_libexecdir}/glusterfs/peer_add_secret_pub
feature/glusterfind: A tool to find incremental changes Documentation is available in patch: http://review.gluster.org/#/c/9800/ A tool which helps to get list of modified files or list of all files in GlusterFS Volume using Changelog or find command. Usage ===== glusterfind --help Create: ------- glusterfind create --help The tool creates status file $GLUSTERD_WORKDIR/SESSION/VOLUME/status and records current timestamp to initiate the session. This timestamp will be used as start time for next runs. As part of create also generates ssh key and distributes to all peers. and enables build.pgfid and changelog using volume set command. Pre: ---- glusterfind pre --help This command is used to generate the list of files modified after session creation time or after last run. To get list of all files/dirs in Volume, run pre command with `--full` argument. The tool gets all nodes details using gluster volume info and runs node agent for each brick in respective nodes via ssh command. Once these node agents generate the output file, tool copies to local using scp. Merges all the output files to generate the final output file. Post: ----- glusterfind post --help After consuming the list, this sub command is called to update the session time based on pre command status file. List: ----- glusterfind list --help To view all the sessions Delete: ------- glusterfind delete --help Delete session. Known Issues ------------ 1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted. 2. Only new name will get listed if Renamed. 3. All hardlinks will get listed. Change-Id: I82991feb0aea85cb6ec035fddbf80a2b276e86b0 BUG: 1193893 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9682 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-02-18 16:37:23 +03:00
%if ( 0%{?_with_firewalld:1} )
%{_prefix}/lib/firewalld/services/glusterfs.xml
%endif
# end of server files
%endif
feature/glusterfind: A tool to find incremental changes Documentation is available in patch: http://review.gluster.org/#/c/9800/ A tool which helps to get list of modified files or list of all files in GlusterFS Volume using Changelog or find command. Usage ===== glusterfind --help Create: ------- glusterfind create --help The tool creates status file $GLUSTERD_WORKDIR/SESSION/VOLUME/status and records current timestamp to initiate the session. This timestamp will be used as start time for next runs. As part of create also generates ssh key and distributes to all peers. and enables build.pgfid and changelog using volume set command. Pre: ---- glusterfind pre --help This command is used to generate the list of files modified after session creation time or after last run. To get list of all files/dirs in Volume, run pre command with `--full` argument. The tool gets all nodes details using gluster volume info and runs node agent for each brick in respective nodes via ssh command. Once these node agents generate the output file, tool copies to local using scp. Merges all the output files to generate the final output file. Post: ----- glusterfind post --help After consuming the list, this sub command is called to update the session time based on pre command status file. List: ----- glusterfind list --help To view all the sessions Delete: ------- glusterfind delete --help Delete session. Known Issues ------------ 1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted. 2. Only new name will get listed if Renamed. 3. All hardlinks will get listed. Change-Id: I82991feb0aea85cb6ec035fddbf80a2b276e86b0 BUG: 1193893 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9682 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-02-18 16:37:23 +03:00
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
# Events
%if ( 0%{!?_without_events:1} )
%files events
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
%dir %{_sharedstatedir}/glusterd
%dir %{_sharedstatedir}/glusterd/events
%dir %{_libexecdir}/glusterfs
%{_libexecdir}/glusterfs/gfevents
%{_libexecdir}/glusterfs/peer_eventsapi.py*
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
%{_sbindir}/glustereventsd
%{_sbindir}/gluster-eventsapi
%{_datadir}/glusterfs/scripts/eventsdash.py*
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/glustereventsd.service
%else
%{_sysconfdir}/init.d/glustereventsd
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
%endif
%endif
%changelog
* Wed Mar 6 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove unneeded ldconfig in scriptlets
- reported by Igor Gnatenko in Fedora
- https://src.fedoraproject.org/rpms/glusterfs/pull-request/5
* Mon Mar 4 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- s390x has RDMA, since around Fedora 27 and in RHEL7 since June 2016.
* Tue Feb 26 2019 Ashish Pandey <aspandey@redhat.com>
- Add thin-arbiter package
* Sun Feb 24 2019 Aravinda VK <avishwan@redhat.com>
- Renamed events package to gfevents
* Thu Feb 21 2019 Jiffin Tony Thottan <jthottan@redhat.com>
- Obsoleting gluster-gnfs package
* Wed Nov 28 2018 Krutika Dhananjay <kdhananj@redhat.com>
- Install /var/lib/glusterd/groups/distributed-virt by default
* Tue Nov 13 2018 Niels de Vos <ndevos@redhat.com>
- Add an option to build with ThreadSanitizer (TSAN)
* Fri Sep 7 2018 Niels de Vos <ndevos@redhat.com>
- Add an option to build with address sanitizer (ASAN)
* Sun Jul 29 2018 Niels de Vos <ndevos@redhat.com>
- Disable building glusterfs-resource-agents on el6 (#1609551)
* Thu Feb 22 2018 Kotresh HR <khiremat@redhat.com>
- Added util-linux as dependency to georeplication rpm (#1544382)
* Thu Feb 1 2018 Niels de Vos <ndevos@redhat.com>
- Add '--without server' option to facilitate el6 builds (#1074947)
* Wed Jan 24 2018 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- python-ctypes no long exists, now in python stdlib (#1538258)
* Thu Jan 18 2018 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Fedora 28 glibc has removed rpc headers and rpcgen, use libtirpc
* Mon Dec 25 2017 Niels de Vos <ndevos@redhat.com>
- Fedora 28 has renamed pyxattr
* Wed Sep 27 2017 Mohit Agrawal <moagrawa@redhat.com>
- Added control-cpu-load.sh and control-mem.sh scripts to glusterfs-server section(#1496335)
* Tue Aug 22 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- libibverbs-devel, librdmacm-devel -> rdma-core-devel #1483995
* Thu Jul 20 2017 Aravinda VK <avishwan@redhat.com>
- Added new tool/binary to set the gfid2path xattr on files
* Thu Jul 13 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- various directories not owned by any package
* Fri Jun 16 2017 Jiffin Tony Thottan <jthottan@redhat.com>
- Add glusterfssharedstorage.service systemd file
* Fri Jun 9 2017 Poornima G <pgurusid@redhat.com>
- Install /var/lib/glusterd/groups/nl-cache by default
* Wed May 10 2017 Pranith Kumar K <pkarampu@redhat.com>
- Install /var/lib/glusterd/groups/gluster-block by default
* Thu Apr 27 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- gnfs in an optional subpackage
* Wed Apr 26 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- /var/run/gluster owner gluster:gluster(0775) for qemu(gfapi)
statedumps (#1445569)
* Mon Apr 24 2017 Jiffin Tony Thottan <jhottan@redhat.com>
- Install SELinux hook scripts that manage contexts for bricks (#1047975)
* Thu Apr 20 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- firewalld-filesystem -> firewalld (#1443959)
* Thu Apr 13 2017 Niels de Vos <ndevos@redhat.com>
- the -regression-tests sub-package needs "bc" for some tests (#1442145)
* Mon Mar 20 2017 Niels de Vos <ndevos@redhat.com>
- Drop dependency on psmisc, pkill is used instead of killall (#1197308)
* Thu Feb 16 2017 Niels de Vos <ndevos@redhat.com>
- Obsolete and Provide python-gluster for upgrading from glusterfs < 3.10
* Tue Feb 7 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove ganesha (#1418417)
* Wed Feb 1 2017 Poornima G <pgurusid@redhat.com>
- Install /var/lib/glusterd/groups/metadata-cache by default
* Wed Jan 18 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- python2 (versus python3) cleanup (#1414902)
* Fri Jan 13 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- switch to storhaug HA
* Fri Jan 6 2017 Niels de Vos <ndevos@redhat.com>
- use macro provided by firewalld-filesystem to reload firewalld
* Thu Nov 24 2016 Jiffin Tony Thottan <jhottan@redhat.com>
- remove S31ganesha-reset.sh from hooks (#1397795)
* Thu Sep 22 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- python-ctypes no long exists, now in python stdlib (#1378436)
* Wed Sep 14 2016 Aravinda VK <avishwan@redhat.com>
- Changed attribute of eventsconfig.json file as same as other configs (#1375532)
* Thu Sep 08 2016 Aravinda VK <avishwan@redhat.com>
- Added init script for glustereventsd (#1365395)
snapshot/eventsapi: Integrate snapshot events with eventsapi 1. EVENT_SNAPSHOT_CREATED : snapshot_name=snap1 volume_name=test_vol snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 2. EVENT_SNAPSHOT_CREATE_FAILED : snapshot_name=snap1 volume_name=test_vol error=Snapshot snap1 already exists 3. EVENT_SNAPSHOT_ACTIVATED : snapshot_name=snap1 snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 4. EVENT_SNAPSHOT_ACTIVATE_FAILED: snapshot_name=snap1 error=Snapshot snap1 is already activated. 5. EVENT_SNAPSHOT_DEACTIVATED : snapshot_name=snap1 snapshot_uuid=26dd6c52-6021-40b1-a507-001a80401d70 6. EVENT_SNAPSHOT_DEACTIVATE_FAILED : snapshot_name=snap3 error=Snapshot (snap3) does not exist. 7. EVENT_SNAPSHOT_SOFT_LIMIT_REACHED : volume_name=test_vol volume_id=2ace2616-5591-4b9b-be2a-38592dda5758 8. EVENT_SNAPSHOT_HARD_LIMIT_REACHED : volume_name=test_vol volume_id=2ace2616-5591-4b9b-be2a-38592dda5758 9. EVENT_SNAPSHOT_RESTORED : snapshot_name=snap1 volume_name=test_vol snapshot_uuid=3a840ec5-08da-4f2b-850d-1d5539a5d14d 10. EVENT_SNAPSHOT_RESTORE_FAILED : snapshot_name=snap10 error=Snapshot (snap10) does not exist 11. EVENT_SNAPSHOT_DELETED : snapshot_name=snap1 snapshot_uuid=d9ff3d4f-f579-4345-a4da-4f9353f0950c 12. EVENT_SNAPSHOT_DELETE_FAILED : snapshot_name=snap2 error=Snapshot (snap2) does not exist 13. EVENT_SNAPSHOT_CLONED : clone_uuid=93ba9f06-cb9c-4ace-aa52-2616e7f31022 snapshot_name=snap1 clone_name=clone2 14. EVENT_SNAPSHOT_CLONE_FAILED : snapshot_name=snap1 clone_name=clone2 error=Volume with name:clone2 already exists 15. EVENT_SNAPSHOT_CONFIG_UPDATED : auto-delete=enable config_type=system_config config_type=volume_config hard_limit=100 16. EVENT_SNAPSHOT_CONFIG_UPDATE_FAILED : error=Invalid snap-max-soft-limit 110. Expected range 1 - 100 17. EVENT_SNAPSHOT_SCHEDULER_INITIALISED : status=Success 18. EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED 19. EVENT_SNAPSHOT_SCHEDULER_ENABLED : status=Successfuly Enabled 20. EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED : error=Snapshot scheduler is already enabled. 21. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED : status=Successfuly added job job1 22. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED : status=Failed to add job job1 error=The job already exists. 23. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED : status=Successfuly edited job job1 24. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED : status=Failed to edit job job2 error=The job cannot be found. 25. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED : status=Successfuly deleted job job1 26. EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED : status=Failed to delete job job1 error=The job cannot be found. 27. EVENT_SNAPSHOT_SCHEDULER_DISABLED : status=Successfuly Disabled 28. EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED : error=Snapshot scheduler is already disabled. Change-Id: I3479cc3fb7af3c76ded67cf289f99547d0a55d21 BUG: 1370567 Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/15329 Tested-by: Aravinda VK <avishwan@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Rajesh Joseph <rjoseph@redhat.com>
2016-08-26 11:35:07 +03:00
* Wed Aug 31 2016 Avra Sengupta <asengupt@redhat.com>
- Added conf.py for snap scheduler
* Wed Aug 31 2016 Aravinda VK <avishwan@redhat.com>
- Added new Geo-replication utility "gluster-georep-sshkey" (#1356508)
* Thu Aug 25 2016 Aravinda VK <avishwan@redhat.com>
- Added gluster-mountbroker utility for geo-rep mountbroker setup (#1343333)
* Mon Aug 22 2016 Milind Changire <mchangir@redhat.com>
- Add psmisc as dependency for glusterfs-fuse for killall command (#1367665)
* Thu Aug 4 2016 Jiffin Tony Thottan <jthottan@redhat.com>
- Remove ganesha.so from client xlators
commn-HA: Add portblock RA to tickle packets post failover(/back) Portblock resource-agents are used to send tickle ACKs so as to reset the oustanding tcp connections. This can be used to reduce the time taken by the NFS clients to reconnect post IP failover/failback. Two new resource agents (nfs_block and nfs_unblock) of type ocf:portblock with action block & unblock are created for each Virtual-IP (cluster_ip-1). These resource agents along with cluster_ip-1 RA are grouped in the order of block->IP->unblock and also the entire group maintains same colocation rules so that they reside on the same node at any given point of time. The contents of tickle_dir are of the following format - * A file is created for each of the VIPs used in the ganesha cluster. * Each of those files contain entries about clients connected as below: SourceIP:port_num DestinationIP:port_num Hence when one server failsover, connections of the clients connected to other VIPs are not affected. Note: During testing I observed that tickle ACKs are sent during failback but not during failover, though I/O successfully resumed post failover. Also added a dependency on portblock RA for glusterfs-ganesha package as it may not be available (as part of resource-agents package) in all the distributions. Change-Id: Icad6169449535f210d9abe302c2a6971a0a96d6f BUG: 1354439 Signed-off-by: Soumya Koduri <skoduri@redhat.com> Reviewed-on: http://review.gluster.org/14878 NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Niels de Vos <ndevos@redhat.com>
2016-07-08 10:00:25 +03:00
* Sun Jul 31 2016 Soumya Koduri <skoduri@redhat.com>
- Add dependency on portblock resource agent for ganesha package (#1354439)
* Mon Jul 18 2016 Aravinda VK <avishwan@redhat.com>
eventsapi: Gluster Eventing Feature implementation [Depends on http://review.gluster.org/14627] Design is available in `glusterfs-specs`, A change from the design is support of webhook instead of Websockets as discussed in the design http://review.gluster.org/13115 Since Websocket support depends on REST APIs, I will add Websocket support once REST APIs patch gets merged Usage: Run following command to start/stop Eventsapi server in all Peers, which will collect the notifications from any Gluster daemon and emits to configured client. gluster-eventsapi start|stop|restart|reload Status of running services can be checked using, gluster-eventsapi status Events listener is a HTTP(S) server which listens to events emited by the Gluster. Create a HTTP Server to listen on POST and register that URL using, gluster-eventsapi webhook-add <URL> [--bearer-token <TOKEN>] For example, if HTTP Server running in `http://192.168.122.188:9000` then add that URL using, gluster-eventsapi webhook-add http://192.168.122.188:9000 If it expects a Token then specify it using `--bearer-token` or `-t` We can also test Webhook if all peer nodes can send message or not using, gluster-eventsapi webhook-test <URL> [--bearer-token <TOKEN>] Configurations can be viewed/updated using, gluster-eventsapi config-get [--name] gluster-eventsapi config-set <NAME> <VALUE> gluster-eventsapi config-reset <NAME|all> If any one peer node was down during config-set/reset or webhook modifications, Run sync command from good node when a peer node comes back. Automatic update is not yet implemented. gluster-eventsapi sync Basic Events Client(HTTP Server) is included with the code, Start running the client with required port and start listening to the events. /usr/share/glusterfs/scripts/eventsdash.py --port 8080 Default port is 9000, if no port is specified, once it started running then configure gluster-eventsapi to send events to that client. Eventsapi Client can be outside of the Cluster, it can be run event on Windows. But only requirement is the client URL should be accessible by all peer nodes.(Or ngrok(https://ngrok.com) like tools can be used) Events implemented with this patch, - Volume Create - Volume Start - Volume Stop - Volume Delete - Peer Attach - Peer Detach It is easy to add/support more events, since it touches Gluster cmd code and to avoid merge conflicts I will add support for more events once this patch merges. BUG: 1334044 Change-Id: I316827ac9dd1443454df7deffe4f54835f7f6a08 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/14248 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
2016-05-05 16:04:41 +03:00
- Added new subpackage events(glusterfs-events) (#1334044)
* Fri Jul 15 2016 Aravinda VK <avishwan@redhat.com>
- Removed ".py" extension from symlink(S57glusterfind-delete-post)(#1356868)
* Thu Jul 14 2016 Aravinda VK <avishwan@redhat.com>
- Removed extra packaging line of cliutils(python-gluster)(#1342356)
* Mon Jul 11 2016 Aravinda VK <avishwan@redhat.com>
- Added Python subpackage "cliutils" under gluster
* Tue May 31 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- broken brp-python-bytecompile in RHEL7 results in installed
but unpackaged files.
* Fri May 6 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- additional dirs and files in /var/lib/glusterd/... (#1326410)
* Tue Apr 26 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- %%postun libs w/o firewalld on RHEL6 (#1330583)
* Tue Apr 12 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- additional dirs and files in /var/lib/glusterd/... (#1326410)
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
* Mon Mar 7 2016 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- %%pre, %%post etc. scriptlet cleanup, ... -p /sbin/ldconfig (#1315024)
* Fri Jan 22 2016 Aravinda VK <avishwan@redhat.com>
- Added schedule_georep.py script to the glusterfs-geo-replication (#1300956)
* Sat Jan 16 2016 Niels de Vos <ndevos@redhat.com>
- glusterfs-server depends on -api (#1296931)
* Sun Jan 10 2016 Niels de Vos <ndevos@redhat.com>
- build system got fixed so that special glupy build is not needed anymore
* Mon Dec 28 2015 Niels de Vos <ndevos@redhat.com>
- hook scripts in glusterfs-ganesha use dbus-send, add dependency (#1294446)
* Tue Dec 22 2015 Niels de Vos <ndevos@redhat.com>
- move hook scripts for nfs-ganesha to the -ganesha sub-package
- use standard 'make' installation for the hook scripts (#1174765)
* Tue Sep 1 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- erroneous ghost of ../hooks/1/delete causes install failure (#1258975)
* Tue Aug 25 2015 Anand Nekkunti <anekkunt@redhat.com>
- adding glusterfs-firewalld service (#1253967)
* Tue Aug 18 2015 Niels de Vos <ndevos@redhat.com>
- Include missing directories for glusterfind hooks scripts (#1225465)
* Mon Jun 15 2015 Niels de Vos <ndevos@redhat.com>
- Replace hook script S31ganesha-set.sh by S31ganesha-start.sh (#1231738)
* Fri Jun 12 2015 Aravinda VK <avishwan@redhat.com>
- Added rsync as dependency to georeplication rpm (#1231205)
* Tue Jun 02 2015 Aravinda VK <avishwan@redhat.com>
- Added post hook for volume delete as part of glusterfind (#1225465)
* Wed May 27 2015 Aravinda VK <avishwan@redhat.com>
- Added stop-all-gluster-processes.sh in glusterfs-server section (#1204641)
* Wed May 20 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- python-gluster should be 'noarch' (#1219954)
* Wed May 20 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- move libgf{db,changelog}.pc from -api-devel to -devel (#1223385)
* Wed May 20 2015 Anand Nekkunti <anekkunt@redhat.com>
- glusterd.socket file cleanup during post run upgrade (#1210404)
glusterd/shared_storage: Provide a volume set option to create and mount the shared storage Introducing a global volume set option(cluster.enable-shared-storage) which helps create and set-up the shared storage meta volume. gluster volume set all cluster.enable-shared-storage enable On enabling this option, the system analyzes the number of peers in the cluster, which are currently connected, and chooses three such peers(including the node the command is issued from). From these peers a volume(gluster_shared_storage) is created. Depending on the number of peers available the volume is either a replica 3 volume(if there are 3 connected peers), or a replica 2 volume(if there are 2 connected peers). "/var/run/gluster/ss_brick" serves as the brick path on each node for the shared storage volume. We also mount the shared storage at "/var/run/gluster/shared_storage" on all the nodes in the cluster as part of enabling this option. If there is only one node in the cluster, or only one node is up then the command will fail Once the volume is created, and mounted the maintainance of the volume like adding-bricks, removing bricks etc., is expected to be the onus of the user. On disabling the option, we provide the user a warning, and on affirmation from the user we stop the shared storage volume, and unmount it from all the nodes in the cluster. gluster volume set all cluster.enable-shared-storage disable Change-Id: Idd92d67b93f444244f99ede9f634ef18d2945dbc BUG: 1222013 Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/10793 Tested-by: Gluster Build System <jenkins@build.gluster.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Rajesh Joseph <rjoseph@redhat.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com>
2015-05-14 12:30:59 +03:00
* Tue May 19 2015 Avra Sengupta <asengupt@redhat.com>
- Added S32gluster_enable_shared_storage.sh as volume set hook script (#1222013)
* Mon May 18 2015 Milind Changire <mchangir@redhat.com>
- Move file peer_add_secret_pub to the server RPM to support glusterfind (#1221544)
* Sun May 17 2015 Niels de Vos <ndevos@redhat.com>
- Fix building on RHEL-5 based distributions (#1222317)
rpm: reduce package dependencies and add -client-xlators Restructuring the RPM packages in order to reduce the dependencies that get installed for glusterfs-api (mainly on request for Qemu hosts). The dependencies of these packages look roughly like this: .------------------. | glusterfs-server | '-------+----------' |\ | \ | '---------------+--------------------. | | | | v v | .----------------. .---------------. | | glusterfs-fuse | | glusterfs-api | | '--------+-------' '------+--------' | /| /| | / | / | | / | / | | / | / | | .-----------+----=---------------' | |/ | | v v v .-----------. .--------------------------. | glusterfs | | glusterfs-client-xlators | '-----------' '--------------------------' With this structure, users can install glusterfs-server, glusterfs-fuse or glusterfs-api (libgfapi) without getting any unneeded xlators or other scripts/binaries. The "glusterfs" and "glusterfsd" binary (symlinked) is now part of the glusterfs-fuse package (moved from glusterfs). This does not make a difference for glusterfs-server installations, because a server installation always needs the glusterfs-fuse package for doing internal mounts. The advantage is that glusterfs-api does not pull in any executables that get into the $PATH. This has caused confusion before when people tried to remove the (wrongly assumed) server-only "glusterfsd" binaries. URL: http://thread.gmane.org/gmane.comp.file-systems.gluster.devel/10643 Change-Id: Id03f1a634ea3c62ab7008345be92e01ccf43b1a6 BUG: 1195947 Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/10554 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Tested-by: NetBSD Build System Reviewed-by: Humble Devassy Chirammal <humble.devassy@gmail.com>
2015-05-05 11:28:15 +03:00
* Tue May 05 2015 Niels de Vos <ndevos@redhat.com>
- Introduce glusterfs-client-xlators to reduce dependencies (#1195947)
* Wed Apr 15 2015 Humble Chirammal <hchiramm@redhat.com>
- Introducing python-gluster package to own gluster namespace in sitelib (#1211848)
* Sat Mar 28 2015 Mohammed Rafi KC <rkavunga@redhat.com>
- Add dependency for librdmacm version >= 1.0.15 (#1206744)
* Tue Mar 24 2015 Niels de Vos <ndevos@redhat.com>
- move libgfdb (with sqlite dependency) to -server subpackage (#1194753)
* Tue Mar 17 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- glusterfs-ganesha sub-package
* Thu Mar 12 2015 Kotresh H R <khiremat@redhat.com>
- gfind_missing_files tool is included (#1187140)
feature/glusterfind: A tool to find incremental changes Documentation is available in patch: http://review.gluster.org/#/c/9800/ A tool which helps to get list of modified files or list of all files in GlusterFS Volume using Changelog or find command. Usage ===== glusterfind --help Create: ------- glusterfind create --help The tool creates status file $GLUSTERD_WORKDIR/SESSION/VOLUME/status and records current timestamp to initiate the session. This timestamp will be used as start time for next runs. As part of create also generates ssh key and distributes to all peers. and enables build.pgfid and changelog using volume set command. Pre: ---- glusterfind pre --help This command is used to generate the list of files modified after session creation time or after last run. To get list of all files/dirs in Volume, run pre command with `--full` argument. The tool gets all nodes details using gluster volume info and runs node agent for each brick in respective nodes via ssh command. Once these node agents generate the output file, tool copies to local using scp. Merges all the output files to generate the final output file. Post: ----- glusterfind post --help After consuming the list, this sub command is called to update the session time based on pre command status file. List: ----- glusterfind list --help To view all the sessions Delete: ------- glusterfind delete --help Delete session. Known Issues ------------ 1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted. 2. Only new name will get listed if Renamed. 3. All hardlinks will get listed. Change-Id: I82991feb0aea85cb6ec035fddbf80a2b276e86b0 BUG: 1193893 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9682 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-02-18 16:37:23 +03:00
* Tue Mar 03 2015 Aravinda VK <avishwan@redhat.com>
- Included glusterfind files as part of server package.
* Sun Mar 1 2015 Avra Sengupta <asengupt@redhat.com>
- Added installation of snap-scheduler
* Thu Feb 26 2015 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- enable cmocka unittest support only when asked for (#1067059)
* Tue Feb 24 2015 Niels de Vos <ndevos@redhat.com>
- POSIX ACL conversion needs BuildRequires libacl-devel (#1185654)
* Wed Feb 18 2015 Andreas Schneider <asn@redhat.com>
- Change cmockery2 to cmocka.
* Wed Feb 18 2015 Kaushal M <kaushal@redhat.com>
- add userspace-rcu as a requirement
* Fri Feb 13 2015 Gaurav Kumar Garg <ggarg@redhat.com>
- .cmd_log_history file should be renamed to cmd_history.log post
upgrade (#1165996)
* Fri Jan 30 2015 Nandaja Varma <nvarma@redhat.com>
- remove checks for rpmbuild/mock from run-tests.sh (#178008)
* Fri Jan 16 2015 Niels de Vos <ndevos@redhat.com>
- add support for /run/gluster through a tmpfiles.d config file (#1182934)
geo-rep: mountbroker user management Non root geo-replication setup is now simplified. This patch provides cli for mountbroker user and options management To set Options, gluster system:: execute mountbroker opt <KEY> <VALUE> # for example, gluster system:: execute mountbroker opt mountbroker-root /var/mountbroker-root gluster system:: execute mountbroker opt geo-replication-log-group geogroup gluster system:: execute mountbroker opt rpc-auth-allow-insecure on To remove option, gluster system:: execute mountbroker optdel <KEY> # for example, gluster system:: execute mountbroker optdel geo-replication-log-group To add/edit user, gluster system:: execute mountbroker user <USERNAME> <VOLUMES> # for example gluster system:: execute mountbroker user geoaccount slavevol1,slavevol2 To remove user, gluster system:: execute mountbroker userdel <USERNAME> # for example gluster system:: execute mountbroker userdel geoaccount For info, gluster system:: execute mountbroker info gluster system:: execute mountbroker -j info For JSON output add -j after mountbroker, for example, gluster system:: execute mountbroker -j user geoaccount slavevol1,slavevol2 PS: Each peer prints its own JSON output, aggregator required from consumer side BUG: 1136312 Change-Id: Ie52210c0bcc91ac2ffd3ba58988222ffca62b47f Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/9398 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: darshan n <dnarayan@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-01-06 15:50:45 +03:00
* Tue Jan 6 2015 Aravinda VK<avishwan@redhat.com>
- Added new libexec script for mountbroker user management (peer_mountbroker)
* Fri Dec 12 2014 Niels de Vos <ndevos@redhat.com>
- do not package all /usr/share/glusterfs/* files in regression-tests (#1169005)
* Fri Sep 26 2014 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- smarter logic in %%post server (#1146426)
* Wed Sep 24 2014 Balamurugan Arumugam <barumuga@redhat.com>
- remove /sbin/ldconfig as interpreter (#1145989)
* Fri Sep 5 2014 Lalatendu Mohanty <lmohanty@redhat.com>
- Changed the description as "GlusterFS a distributed filesystem"
* Tue Aug 5 2014 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- logrotate files (#1126832)
* Wed Jul 16 2014 Luis Pabon <lpabon@redhat.com>
- Added cmockery2 dependency
* Fri Jun 27 2014 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- killall --wait in %%post server, (#1113543)
* Thu Jun 19 2014 Humble Chirammal <hchiramm@redhat.com>
- Added dynamic loading of fuse module with glusterfs-fuse package installation in el5.
* Thu Jun 12 2014 Varun Shastry <vshastry@redhat.com>
- Add bash completion config to the cli package
* Tue Jun 03 2014 Vikhyat Umrao <vumrao@redhat.com>
- add nfs-utils package dependency for server package (#1065654)
* Thu May 22 2014 Poornima G <pgurusid@redhat.com>
- Rename old hookscripts in an RPM-standard way.
* Tue May 20 2014 Niels de Vos <ndevos@redhat.com>
- Almost drop calling ./autogen.sh
* Fri Apr 25 2014 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora spec (#1091408, #1091392)
* Fri Apr 25 2014 Arumugam Balamurugan <barumuga@redhat.com>
- fix RHEL 7 build failure "Installed (but unpackaged) file(s) found" (#1058188)
* Wed Apr 02 2014 Arumugam Balamurugan <barumuga@redhat.com>
- cleanup to rearrange spec file elements
* Wed Apr 02 2014 Arumugam Balamurugan <barumuga@redhat.com>
- add version/release dynamically (#1074919)
packaging: rpm scriptlet cleanup, handle -p /sbin/ldconfig The RPM documention indicates that during an rpm install or erase, the script(lets): %post, %preun, and %postun (and %pre, %build, %install, etc.) are copied to a temp file, and then the temp file is run as a (/bin/sh or bash) script. Unfortunately the documentation is not clear about how rpmbuild and/or rpm determine where the end of any scriptlet is when it is copied to the file. Most things in the glusterfs.spec work correctly as is. These are the %preun, %post, and %postun scriptlets that are "closed" by a following %preun, %post, and %postun, or poetentially another scriptlet, e.g. %file. The ones that don't work correctly (only one actually) are those where there is a comment in the spec file before it is closed by another scriptlet. Further complicating things is that the type of scriptlet affects what rpm does and what `rpm -qp --scripts ...` shows. The specific one that didn't work was the "%postun libs" scriptlet. It is followed by a comment before being "closed" by the %files section (or scriptlet). It can be written two ways: "%postun libs\n/sbin/ldconfig" or "%postun libs -p /sbin/ldconfig" Either way it's written, `rpm -qp --scripts glusterfs-libs...` will include the comment lines between the %postun libs line and the following %files line. But the way rpm executes these depends on how they're written. If written as "%postun libs\n/sbin/ldconfig" rpm will simply run /sbin/ldconfig with no command line options, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig" ], [ ]); But when written as "%postun libs -p /sbin/ldconfig", it will copy the comment lines to a temp file, and pass the temp file name and "1" as (command line) parameters, i.e. execve ("/sbin/ldconfig", [ "/sbin/ldconfig", "/tmp/tmpXXXXXX", "1" ], [ ]); Which results in ldconfig exiting with an error. (Remember, both ways show the comment in `rpm -qp --scripts ...`) (Note though, that the similar "%postun api -p /sbin/ldconfig" is run correctly, because it is "closed" by the following "%postun server" scriptlet.) Finally, through trial and error, it appears that rpm can be tricked with a hack, and "closure" of the scriptlet forcedlike this: %postun libs -p /sbin/ldconfig %if ( 0%{?_undocumented_hack_closes_scriptlets} ) %postun %endif in which case ldconfig appears to run correctly. Note also that here too the comment will be included in the output of `rpm -qp --scripts ...` But that's very ugly hack. Change-Id: I587a490ddcdf47d01605479bc8ef8b0e439108fb BUG: 1315024 Signed-off-by: Kaleb S KEITHLEY <kkeithle@redhat.com> Reviewed-on: http://review.gluster.org/13613 Smoke: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Niels de Vos <ndevos@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
2016-03-06 00:06:18 +03:00
* Thu Mar 27 2014 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- attr dependency (#1184626)
* Wed Mar 26 2014 Poornima G <pgurusid@redhat.com>
- Include the hook scripts of add-brick, volume start, stop and set
* Wed Feb 26 2014 Niels de Vos <ndevos@redhat.com>
- Drop glusterfs-devel dependency from glusterfs-api (#1065750)
* Wed Feb 19 2014 Justin Clift <justin@gluster.org>
- Rename gluster.py to glupy.py to avoid namespace conflict (#1018619)
- Move the main Glupy files into glusterfs-extra-xlators rpm
- Move the Glupy Translator examples into glusterfs-devel rpm
* Thu Feb 06 2014 Aravinda VK <avishwan@redhat.com>
- Include geo-replication upgrade scripts and hook scripts.
* Wed Jan 15 2014 Niels de Vos <ndevos@redhat.com>
- Install /var/lib/glusterd/groups/virt by default
* Sat Jan 4 2014 Niels de Vos <ndevos@redhat.com>
- The main glusterfs package should not provide glusterfs-libs (#1048489)
* Tue Dec 10 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec 3.5.0-0.1.qa3
* Fri Oct 11 2013 Harshavardhana <fharshav@redhat.com>
- Add '_sharedstatedir' macro to `/var/lib` on <= RHEL5 (#1003184)
* Wed Oct 9 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec 3.4.1-2+
* Wed Oct 9 2013 Niels de Vos <ndevos@redhat.com>
- glusterfs-api-devel requires glusterfs-devel (#1016938, #1017094)
* Mon Sep 30 2013 Niels de Vos <ndevos@redhat.com>
- Package gfapi.py into the Python site-packages path (#1005146)
* Tue Sep 17 2013 Harshavardhana <fharshav@redhat.com>
- Provide a new package called "glusterfs-regression-tests" for standalone
regression testing.
* Thu Aug 22 2013 Niels de Vos <ndevos@redhat.com>
- Correct the day/date for some entries in this changelog (#1000019)
* Wed Aug 7 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec
- add Requires
- add -cli subpackage,
- fix other minor differences with Fedora glusterfs.spec
* Tue Jul 30 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec, add glusterfs-libs RPM for oVirt/qemu-kvm
glusterd/cli changes for distributed geo-rep Commands: gluster system:: execute gsec_create gluster volume geo-rep <master> <slave-url> create [push-pem] [force] gluster volume geo-rep <master> <slave-url> start [force] gluster volume geo-rep <master> <slave-url> stop [force] gluster volume geo-rep <master> <slave-url> delete gluster volume geo-rep <master> <slave-url> config gluster volume geo-rep <master> <slave-url> status The geo-replication is distributed. The session will be created, and gsyncd will be spawned on all relevant nodes, instead of only one node. geo-rep: Collecting status detail related data Added persistent store for saving information about TotalFilesSynced, TotalSyncTime, TotalBytesSynced Changes in the status information in socket: Existing(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01; New(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01;SyncTime=0.69978; TotalSyncTime=2.890044;TotalFilesSynced=6;TotalBytesSynced=143640; Persistent details stored in /var/lib/glusterd/geo-replication/${mastervol}/${eSlave}-detail.status Change-Id: I1db7fc13ffca2e415c05200b0109b1254067f111 BUG: 847839 Original Author: Avra Sengupta <asengupt@redhat.com> Original Author: Venky Shankar <vshankar@redhat.com> Original Author: Aravinda VK <avishwan@redhat.com> Original Author: Amar Tumballi <amarts@redhat.com> Original Author: Csaba Henk <csaba@redhat.com> Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/5132 Reviewed-by: Vijay Bellur <vbellur@redhat.com> Tested-by: Vijay Bellur <vbellur@redhat.com>
2013-07-10 16:02:41 +04:00
* Thu Jul 25 2013 Csaba Henk <csaba@redhat.com>
- Added peer_add_secret_pub and peer_gsec_create to %%{_libexecdir}/glusterfs
glusterd/cli changes for distributed geo-rep Commands: gluster system:: execute gsec_create gluster volume geo-rep <master> <slave-url> create [push-pem] [force] gluster volume geo-rep <master> <slave-url> start [force] gluster volume geo-rep <master> <slave-url> stop [force] gluster volume geo-rep <master> <slave-url> delete gluster volume geo-rep <master> <slave-url> config gluster volume geo-rep <master> <slave-url> status The geo-replication is distributed. The session will be created, and gsyncd will be spawned on all relevant nodes, instead of only one node. geo-rep: Collecting status detail related data Added persistent store for saving information about TotalFilesSynced, TotalSyncTime, TotalBytesSynced Changes in the status information in socket: Existing(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01; New(Ex): FilesSynced=2;BytesSynced=2507;Uptime=00:26:01;SyncTime=0.69978; TotalSyncTime=2.890044;TotalFilesSynced=6;TotalBytesSynced=143640; Persistent details stored in /var/lib/glusterd/geo-replication/${mastervol}/${eSlave}-detail.status Change-Id: I1db7fc13ffca2e415c05200b0109b1254067f111 BUG: 847839 Original Author: Avra Sengupta <asengupt@redhat.com> Original Author: Venky Shankar <vshankar@redhat.com> Original Author: Aravinda VK <avishwan@redhat.com> Original Author: Amar Tumballi <amarts@redhat.com> Original Author: Csaba Henk <csaba@redhat.com> Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/5132 Reviewed-by: Vijay Bellur <vbellur@redhat.com> Tested-by: Vijay Bellur <vbellur@redhat.com>
2013-07-10 16:02:41 +04:00
* Thu Jul 25 2013 Aravinda VK <avishwan@redhat.com>
- Added gverify.sh to %%{_libexecdir}/glusterfs directory.
* Thu Jul 25 2013 Harshavardhana <fharshav@redhat.com>
- Allow to build with '--without bd' to disable 'bd' xlator
* Thu Jun 27 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- fix the hardening fix for shlibs, use %%sed macro, shorter ChangeLog
* Wed Jun 26 2013 Niels de Vos <ndevos@redhat.com>
- move the mount/api xlator to glusterfs-api
* Fri Jun 7 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec, remove G4S/UFO and Swift
* Mon Mar 4 2013 Niels de Vos <ndevos@redhat.com>
- Package /var/run/gluster so that statedumps can be created
* Wed Feb 6 2013 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- Sync with Fedora glusterfs.spec
* Tue Dec 11 2012 Filip Pytloun <filip.pytloun@gooddata.com>
- add sysconfig file
* Thu Oct 25 2012 Niels de Vos <ndevos@redhat.com>
- Add a sub-package for the OCF resource agents
* Wed Sep 05 2012 Niels de Vos <ndevos@redhat.com>
- Don't use python-ctypes on SLES (from Jörg Petersen)
* Tue Jul 10 2012 Niels de Vos <ndevos@redhat.com>
- Include extras/clear_xattrs.sh in the glusterfs-server sub-package
* Thu Jun 07 2012 Niels de Vos <ndevos@redhat.com>
- Mark /var/lib/glusterd as owned by glusterfs, subdirs belong to -server
* Wed May 9 2012 Kaleb S. KEITHLEY <kkeithle[at]redhat.com>
- Add BuildRequires: libxml2-devel so that configure will DTRT on for
- Fedora's Koji build system
* Wed Nov 9 2011 Joe Julian <me@joejulian.name> - git master
- Merge fedora specfile into gluster's spec.in.
- Add conditionals to allow the same spec file to be used for both 3.1 and 3.2
- http://bugs.gluster.com/show_bug.cgi?id=2970
* Wed Oct 5 2011 Joe Julian <me@joejulian.name> - 3.2.4-1
- Update to 3.2.4
- Removed the $local_fs requirement from the init scripts as in RHEL/CentOS that's provided
- by netfs, which needs to be started after glusterd.
* Sun Sep 25 2011 Joe Julian <me@joejulian.name> - 3.2.3-2
- Merged in upstream changes
- Fixed version reporting 3.2git
- Added nfs init script (disabled by default)
* Thu Sep 1 2011 Joe Julian <me@joejulian.name> - 3.2.3-1
- Update to 3.2.3
* Tue Jul 19 2011 Joe Julian <me@joejulian.name> - 3.2.2-3
- Add readline and libtermcap dependencies
* Tue Jul 19 2011 Joe Julian <me@joejulian.name> - 3.2.2-2
- Critical patch to prevent glusterd from walking outside of its own volume during rebalance
* Thu Jul 14 2011 Joe Julian <me@joejulian.name> - 3.2.2-1
- Update to 3.2.2
* Wed Jul 13 2011 Joe Julian <me@joejulian.name> - 3.2.1-2
- fix hardcoded path to gsyncd in source to match the actual file location
* Tue Jun 21 2011 Joe Julian <me@joejulian.name> - 3.2.1
- Update to 3.2.1
* Mon Jun 20 2011 Joe Julian <me@joejulian.name> - 3.1.5
- Update to 3.1.5
* Tue May 31 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.4
- Current git
* Sun May 29 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.2
- set _sharedstatedir to /var/lib for FHS compliance in RHEL5/CentOS5
- mv /etc/glusterd, if it exists, to the new state dir for upgrading from gluster packaging
* Sat May 28 2011 Joe Julian <me@joejulian.name> - 3.1.5-qa1.1
- Update to 3.1.5-qa1
- Add patch to remove optimization disabling
- Add patch to remove forced 64 bit compile
- Obsolete glusterfs-core to allow for upgrading from gluster packaging
* Sat Mar 19 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.3-1
- Update to 3.1.3
- Merge in more upstream SPEC changes
- Remove patches from GlusterFS bugzilla #2309 and #2311
- Remove inode-gen.patch
* Sun Feb 06 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-3
- Add back in legacy SPEC elements to support older branches
* Thu Feb 03 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-2
- Add patches from CloudFS project
* Tue Jan 25 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.2-1
- Update to 3.1.2
* Wed Jan 5 2011 Dan Horák <dan[at]danny.cz> - 3.1.1-3
- no InfiniBand on s390(x)
* Sat Jan 1 2011 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.1.1-2
- Update to support readline
- Update to not parallel build
* Mon Dec 27 2010 Silas Sewell <silas@sewell.ch> - 3.1.1-1
- Update to 3.1.1
- Change package names to mirror upstream
* Mon Dec 20 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.0.7-1
- Update to 3.0.7
* Wed Jul 28 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 3.0.5-1
- Update to 3.0.x
* Sat Apr 10 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.9-2
- Move python version requires into a proper BuildRequires otherwise
the spec always turned off python bindings as python is not part
of buildsys-build and the chroot will never have python unless we
require it
- Temporarily set -D_FORTIFY_SOURCE=1 until upstream fixes code
GlusterFS Bugzilla #197 (#555728)
- Move glusterfs-volgen to devel subpackage (#555724)
- Update description (#554947)
* Sat Jan 2 2010 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.9-1
- Update to 2.0.9
* Sun Nov 8 2009 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.8-1
- Update to 2.0.8
- Remove install of glusterfs-volgen, it's properly added to
automake upstream now
* Sat Oct 31 2009 Jonathan Steffan <jsteffan@fedoraproject.org> - 2.0.7-1
- Update to 2.0.7
- Install glusterfs-volgen, until it's properly added to automake
by upstream
- Add macro to be able to ship more docs
* Thu Sep 17 2009 Peter Lemenkov <lemenkov@gmail.com> 2.0.6-2
- Rebuilt with new fuse
* Sat Sep 12 2009 Matthias Saou <http://freshrpms.net/> 2.0.6-1
- Update to 2.0.6.
- No longer default to disable the client on RHEL5 (#522192).
- Update spec file URLs.
* Mon Jul 27 2009 Matthias Saou <http://freshrpms.net/> 2.0.4-1
- Update to 2.0.4.
* Thu Jun 11 2009 Matthias Saou <http://freshrpms.net/> 2.0.1-2
- Remove libglusterfs/src/y.tab.c to fix koji F11/devel builds.
* Sat May 16 2009 Matthias Saou <http://freshrpms.net/> 2.0.1-1
- Update to 2.0.1.
* Thu May 7 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-1
- Update to 2.0.0 final.
* Wed Apr 29 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.3.rc8
- Move glusterfsd to common, since the client has a symlink to it.
* Fri Apr 24 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.2.rc8
- Update to 2.0.0rc8.
* Sun Apr 12 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.2.rc7
- Update glusterfsd init script to the new style init.
- Update files to match the new default vol file names.
- Include logrotate for glusterfsd, use a pid file by default.
- Include logrotate for glusterfs, using killall for lack of anything better.
* Sat Apr 11 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.1.rc7
- Update to 2.0.0rc7.
- Rename "libs" to "common" and move the binary, man page and log dir there.
* Tue Feb 24 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org>
- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild
* Mon Feb 16 2009 Matthias Saou <http://freshrpms.net/> 2.0.0-0.1.rc1
- Update to 2.0.0rc1.
- Include new libglusterfsclient.h.
* Mon Feb 16 2009 Matthias Saou <http://freshrpms.net/> 1.3.12-1
- Update to 1.3.12.
- Remove no longer needed ocreat patch.
* Thu Jul 17 2008 Matthias Saou <http://freshrpms.net/> 1.3.10-1
- Update to 1.3.10.
- Remove mount patch, it's been included upstream now.
* Fri May 16 2008 Matthias Saou <http://freshrpms.net/> 1.3.9-1
- Update to 1.3.9.
* Fri May 9 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-1
- Update to 1.3.8 final.
* Wed Apr 23 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.10
- Include short patch to include fixes from latest TLA 751.
* Tue Apr 22 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.9
- Update to 1.3.8pre6.
- Include glusterfs binary in both the client and server packages, now that
glusterfsd is a symlink to it instead of a separate binary.
* Sun Feb 3 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.8
- Add python version check and disable bindings for version < 2.4.
* Sun Feb 3 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.7
- Add --without client rpmbuild option, make it the default for RHEL (no fuse).
(I hope "rhel" is the proper default macro name, couldn't find it...)
* Wed Jan 30 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.6
- Add --without ibverbs rpmbuild option to the package.
* Mon Jan 14 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.5
- Update to current TLA again, patch-636 which fixes the known segfaults.
* Thu Jan 10 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.4
- Downgrade to glusterfs--mainline--2.5--patch-628 which is more stable.
* Tue Jan 8 2008 Matthias Saou <http://freshrpms.net/> 1.3.8-0.3
- Update to current TLA snapshot.
- Include umount.glusterfs wrapper script (really needed? dunno).
- Include patch to mount wrapper to avoid multiple identical mounts.
* Sun Dec 30 2007 Matthias Saou <http://freshrpms.net/> 1.3.8-0.1
- Update to current TLA snapshot, which includes "volume-name=" fstab option.
* Mon Dec 3 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-6
- Re-add the /var/log/glusterfs directory in the client sub-package (required).
- Include custom patch to support vol= in fstab for -n glusterfs client option.
* Mon Nov 26 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-4
- Re-enable libibverbs.
- Check and update License field to GPLv3+.
- Add glusterfs-common obsoletes, to provide upgrade path from old packages.
- Include patch to add mode to O_CREATE opens.
* Thu Nov 22 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-3
- Remove Makefile* files from examples.
- Include RHEL/Fedora type init script, since the included ones don't do.
* Wed Nov 21 2007 Matthias Saou <http://freshrpms.net/> 1.3.7-1
- Major spec file cleanup.
- Add missing %%clean section.
- Fix ldconfig calls (weren't set for the proper sub-package).
* Sat Aug 4 2007 Matt Paine <matt@mattsoftware.com> - 1.3.pre7
- Added support to build rpm without ibverbs support (use --without ibverbs
switch)
* Sun Jul 15 2007 Matt Paine <matt@mattsoftware.com> - 1.3.pre6
- Initial spec file