extras/group : add database workload profile
Running DB workload patterns with all perf xlators enabled as default has resulted into some inconsistency issues. Based on the internal testing done by Elko Kuric (ekuric@redhat.com) there're certain set of perf xlators which need to be turned off to get these types of workload supported by Gluster. The proposal is to leverage group profile infrastructure to group together all those tunables at one place so that users just need to apply the profile to the volume to use it for the data base workload. Credits : Elko Kuric (ekuric@redhat.com) Change-Id: I8a50e915278ad4085b9aaa3f160a33af7c0b0444 fixes: bz#1596020 Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
This commit is contained in:
parent
762ed20a50
commit
25c1c6e9a2
@ -16,7 +16,9 @@ SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
|
||||
confdir = $(sysconfdir)/glusterfs
|
||||
if WITH_SERVER
|
||||
conf_DATA = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \
|
||||
logger.conf.example glusterfs-georep-logrotate group-virt.example group-metadata-cache group-gluster-block group-nl-cache
|
||||
logger.conf.example glusterfs-georep-logrotate group-virt.example \
|
||||
group-metadata-cache group-gluster-block group-nl-cache \
|
||||
group-db-workload
|
||||
endif
|
||||
|
||||
voldir = $(sysconfdir)/glusterfs
|
||||
@ -37,14 +39,15 @@ endif
|
||||
|
||||
EXTRA_DIST = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \
|
||||
logger.conf.example glusterfs-georep-logrotate group-virt.example \
|
||||
group-metadata-cache group-gluster-block group-nl-cache specgen.scm \
|
||||
glusterfs-mode.el glusterfs.vim migrate-unify-to-distribute.sh \
|
||||
backend-xattr-sanitize.sh backend-cleanup.sh disk_usage_sync.sh \
|
||||
clear_xattrs.sh glusterd-sysconfig glusterd.vol \
|
||||
post-upgrade-script-for-quota.sh pre-upgrade-script-for-quota.sh \
|
||||
command-completion/gluster.bash command-completion/Makefile \
|
||||
command-completion/README stop-all-gluster-processes.sh clang-checker.sh \
|
||||
mount-shared-storage.sh control-cpu-load.sh control-mem.sh
|
||||
group-metadata-cache group-gluster-block group-nl-cache \
|
||||
group-db-workload specgen.scm glusterfs-mode.el glusterfs.vim \
|
||||
migrate-unify-to-distribute.sh backend-xattr-sanitize.sh \
|
||||
backend-cleanup.sh disk_usage_sync.sh clear_xattrs.sh \
|
||||
glusterd-sysconfig glusterd.vol post-upgrade-script-for-quota.sh \
|
||||
pre-upgrade-script-for-quota.sh command-completion/gluster.bash \
|
||||
command-completion/Makefile command-completion/README \
|
||||
stop-all-gluster-processes.sh clang-checker.sh mount-shared-storage.sh \
|
||||
control-cpu-load.sh control-mem.sh
|
||||
|
||||
if WITH_SERVER
|
||||
install-data-local:
|
||||
@ -62,4 +65,6 @@ install-data-local:
|
||||
$(DESTDIR)$(GLUSTERD_WORKDIR)/groups/gluster-block
|
||||
$(INSTALL_DATA) $(top_srcdir)/extras/group-nl-cache \
|
||||
$(DESTDIR)$(GLUSTERD_WORKDIR)/groups/nl-cache
|
||||
$(INSTALL_DATA) $(top_srcdir)/extras/group-db-workload \
|
||||
$(DESTDIR)$(GLUSTERD_WORKDIR)/groups/db-workload
|
||||
endif
|
||||
|
8
extras/group-db-workload
Normal file
8
extras/group-db-workload
Normal file
@ -0,0 +1,8 @@
|
||||
performance.open-behind=off
|
||||
performance.write-behind=off
|
||||
performance.stat-prefetch=off
|
||||
performance.quick-read=off
|
||||
performance.strict-o-direct=on
|
||||
performance.read-ahead=off
|
||||
performance.io-cache=off
|
||||
performance.readdir-ahead=off
|
@ -1391,6 +1391,7 @@ exit 0
|
||||
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
|
||||
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
|
||||
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
|
||||
%attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
|
||||
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
|
||||
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
|
||||
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
|
||||
|
Loading…
x
Reference in New Issue
Block a user