2007-05-30 06:37:03 +04:00
# Options to ctdbd. This is read by /etc/init.d/ctdb
2007-05-28 19:38:04 +04:00
2007-06-02 05:36:42 +04:00
# you must specify the location of a shared lock file across all the
# nodes. This must be on shared storage
# there is no default
# CTDB_RECOVERY_LOCK="/some/place/on/shared/storage"
2007-09-10 09:09:28 +04:00
# when doing IP takeover you also may specify what network interface
# to use by default for the public addresses. Otherwise you must
# specify an interface on each line of the public addresses file
# there is no default
# CTDB_PUBLIC_INTERFACE=eth0
2007-09-04 03:50:07 +04:00
# Should ctdb do IP takeover? If it should, then specify a file
2007-06-02 12:51:05 +04:00
# containing the list of public IP addresses that ctdb will manage
# Note that these IPs must be different from those in $NODES above
2007-09-04 03:50:07 +04:00
# there is no default.
# The syntax is one line per public address of the form :
# <ipaddress>/<netmask> <interface>
# Example: 10.1.1.1/24 eth0
#
2007-06-04 09:44:52 +04:00
# CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
2007-06-02 12:51:05 +04:00
2008-01-07 06:31:13 +03:00
# Should CTDB present the cluster using a single public ip address to clients
# and multiplex clients across all CONNECTED nodes ?
# This is based on LVS
# When this is enabled, the entire cluster will present one single ip address
# which clients will connect to.
# CTDB_LVS_PUBLIC_IP=10.1.1.1
2007-06-02 12:51:05 +04:00
# should ctdb manage starting/stopping the Samba service for you?
# default is to not manage Samba
# CTDB_MANAGES_SAMBA=yes
2007-06-02 05:36:42 +04:00
2009-02-20 02:58:34 +03:00
# If there are very many shares it may not be feasible to check that all
# of them are available during each monitoring interval.
# In that case this check can be disabled
# CTDB_SAMBA_SKIP_SHARE_CHECK=yes
2009-03-03 23:21:55 +03:00
# CTDB_NFS_SKIP_SHARE_CHECK=yes
2009-02-20 02:58:34 +03:00
2008-07-15 05:03:35 +04:00
# specify which ports we should check that there is a daemon listening to
# by default we use testparm and look in smb.conf to figure out.
# CTDB_SAMBA_CHECK_PORTS="445"
2007-11-14 08:17:52 +03:00
# should ctdb manage starting/stopping Winbind service?
2007-11-18 07:14:54 +03:00
# if left comented out then it will be autodetected based on smb.conf
2007-11-14 08:17:52 +03:00
# CTDB_MANAGES_WINBIND=yes
2008-05-22 00:01:17 +04:00
# should ctdb manage starting/stopping the VSFTPD service
# CTDB_MANAGES_VSFTPD=yes
2008-05-22 00:04:36 +04:00
# should ctdb manage starting/stopping the ISCSI service
# CTDB_MANAGES_ISCSI=yes
2008-05-22 00:08:38 +04:00
# should ctdb manage starting/stopping the NFS service
# CTDB_MANAGES_NFS=yes
2009-03-10 02:21:04 +03:00
# should ctdb manage starting/stopping the Apache web server httpd?
# CTDB_MANAGES_HTTPD
2009-03-09 02:08:26 +03:00
# The init style (redhat/suse/ubuntu...) is usually auto-detected.
# The names of init scripts of services managed by CTDB are set
# based on the detected init style. You can override the init style
# auto-detection here to explicitly use a scheme. This might be
# useful when you have installed a packages (for instance samba
# packages) with a different init script layout.
# There is no default.
# CTDB_INIT_STYLE=redhat
2008-05-22 00:01:17 +04:00
2009-03-09 02:20:30 +03:00
# The following are specific Samba init scripts / services that you
# can override from auto-detection.
# There are no defaults.
# CTDB_SERVICE_SMB=smb
# CTDB_SERVICE_NMB=nmb
# CTDB_SERVICE_WINBIND=winbind
2007-06-05 09:18:37 +04:00
# you may wish to raise the file descriptor limit for ctdb
# use a ulimit command here. ctdb needs one file descriptor per
# connected client (ie. one per connected client in Samba)
# ulimit -n 10000
2007-05-30 06:37:03 +04:00
# the NODES file must be specified or ctdb won't start
# it should contain a list of IPs that ctdb will use
# it must be exactly the same on all cluster nodes
# defaults to /etc/ctdb/nodes
2007-06-04 09:44:52 +04:00
# CTDB_NODES=/etc/ctdb/nodes
2007-05-30 06:37:03 +04:00
2009-03-31 07:23:31 +04:00
# a script to run when node health changes
# CTDB_NOTIFY_SCRIPT=/etc/ctdb/notify.sh
2007-05-30 06:37:03 +04:00
# the directory to put the local ctdb database files in
# defaults to /var/ctdb
2007-06-04 09:44:52 +04:00
# CTDB_DBDIR=/var/ctdb
2007-05-30 06:37:03 +04:00
2007-09-21 10:12:04 +04:00
# the directory to put the local persistent ctdb database files in
# defaults to /var/ctdb/persistent
# CTDB_DBDIR_PERSISTENT=/var/ctdb/persistent
2007-08-15 08:44:03 +04:00
# the directory where service specific event scripts are stored
# defaults to /etc/ctdb/events.d
# CTDB_EVENT_SCRIPT_DIR=/etc/ctdb/events.d
2007-05-30 06:37:03 +04:00
# the location of the local ctdb socket
# defaults to /tmp/ctdb.socket
2007-05-29 10:02:02 +04:00
# CTDB_SOCKET=/tmp/ctdb.socket
2007-05-30 06:37:03 +04:00
# what transport to use. Only tcp is currently supported
# defaults to tcp
2007-06-04 09:44:52 +04:00
# CTDB_TRANSPORT="tcp"
2007-05-30 06:37:03 +04:00
2008-02-21 05:29:28 +03:00
# When set, this variable makes ctdb monitor the amount of free memory
# in the system (the second number in the buffers/cache output from free -m).
# If the amount of free memory drops below this treshold the node will become
2008-02-22 01:42:52 +03:00
# unhealthy and ctdb and all managed services will be shutdown.
# Once this occurs, the administrator needs to find the reason for the OOM
# situation, rectify it and restart ctdb with "service ctdb start"
2008-02-21 05:29:28 +03:00
# The unit is MByte
# CTDB_MONITOR_FREE_MEMORY=100
2008-02-22 01:42:52 +03:00
# When set to yes, the CTDB node will start in DISABLED mode and not host
# any public ip addresses. The administrator needs to explicitely enable
# the node with "ctdb enable"
# CTDB_START_AS_DISABLED="yes"
2008-05-06 04:41:22 +04:00
# LMASTER and RECMASTER capabilities.
# By default all nodes are capable of both being LMASTER for records and
# also for taking the RECMASTER role and perform recovery.
# These parameters can be used to disable these two roles on a node.
# Note: If there are NO available nodes left in a cluster that can perform
# the RECMASTER role, the cluster will not be able to recover from a failure
# and will remain in RECOVERY mode until an RECMASTER capable node becomes
# available. Same for LMASTER.
# These parametersd are useful for scenarios where you have one "remote" node
# in a cluster and you do not want the remote node to be fully participating
# in the cluster and slow things down.
# For that case, set both roles to "no" for the remote node on the remote site
# but leave the roles default to "yes" on the primary nodes in the central
# datacentre.
# CTDB_CAPABILITY_RECMASTER=yes
# CTDB_CAPABILITY_LMASTER=yes
2009-03-16 23:35:53 +03:00
# NAT-GW configuration
# Some services running on nthe CTDB node may need to originate traffic to
# remote servers before the node is assigned any IP addresses,
# This is problematic since before the node has public addresses the node might
# not be able to route traffic to the public networks.
# One solution is to have static public addresses assigned with routing
# in addition to the public address interfaces, thus guaranteeing that
# a node always can route traffic to the external network.
# This is the most simple solution but it uses up a large number of
# additional ip addresses.
#
# A more complex solution is NAT-GW.
# In this mode we only need one additional ip address for the cluster from
# the exsternal public network.
# One of the nodes in the cluster is elected to be hosting this ip address
# so it can reach the external services. This node is also configured
# to use NAT MASQUERADING for all traffic from the internal private network
# to the external network. This node is the NAT-GW node.
#
2009-03-18 11:19:49 +03:00
# All other nodes are set up with a default rote with a metric of 10 to point
# to the nat-gw node.
2009-03-16 23:35:53 +03:00
#
# The effect of this is that only when a node does not have a public address
2009-03-18 11:19:49 +03:00
# and thus no proper routes to the external world it will instead
# route all packets through the nat-gw node.
#
2009-05-14 02:12:48 +04:00
# CTDB_NATGW_NODES is the list of nodes that belong to this natgw group.
2009-03-25 05:37:57 +03:00
# You can have multiple natgw groups in one cluster but each node
# can only belong to one single natgw group.
#
2009-05-14 02:12:48 +04:00
# CTDB_NATGW_PUBLIC_IP=10.0.0.227/24
# CTDB_NATGW_PUBLIC_IFACE=eth0
# CTDB_NATGW_DEFAULT_GATEWAY=10.0.0.1
# CTDB_NATGW_PRIVATE_NETWORK=10.1.1.0/24
# CTDB_NATGW_NODES=/etc/ctdb/natgw_nodes
2009-03-16 23:35:53 +03:00
2007-05-30 06:37:03 +04:00
# where to log messages
# the default is /var/log/log.ctdb
2007-06-04 09:44:52 +04:00
# CTDB_LOGFILE=/var/log/log.ctdb
2007-05-30 06:37:03 +04:00
# what debug level to run at. Higher means more verbose
2008-11-13 02:55:20 +03:00
# the default is 2
CTDB_DEBUGLEVEL=2
2007-05-29 10:02:02 +04:00
2007-06-04 14:05:31 +04:00
# set any default tuning options for ctdb
# use CTDB_SET_XXXX=value where XXXX is the name of the tuning
# variable
# for example
# CTDB_SET_TRAVERSETIMEOUT=60
# you can get a list of variables using "ctdb listvars"
2007-05-30 06:37:03 +04:00
# any other options you might want. Run ctdbd --help for a list
2007-05-29 10:02:02 +04:00
# CTDB_OPTIONS=