mirror of
https://github.com/samba-team/samba.git
synced 2024-12-22 13:34:15 +03:00
change how we do public addresses and takeover so that we can have
multiple public addresses spread across multiple interfaces on each node. this is a massive patch since we have previously made the assumtion that we only have one public address per node. get rid of the public_interface argument. the public addresses file now explicitely lists which interface the address belongs to (This used to be ctdb commit 462ebbc791e906a6b874c862defea43235597ca8)
This commit is contained in:
parent
4495dbacec
commit
12ebb74838
@ -1540,7 +1540,7 @@ uint32_t *ctdb_get_connected_nodes(struct ctdb_context *ctdb,
|
||||
|
||||
for (i=0;i<map->num;i++) {
|
||||
if (!(map->nodes[i].flags & NODE_FLAGS_DISCONNECTED)) {
|
||||
nodes[*num_nodes] = map->nodes[i].vnn;
|
||||
nodes[*num_nodes] = map->nodes[i].pnn;
|
||||
(*num_nodes)++;
|
||||
}
|
||||
}
|
||||
@ -2300,15 +2300,16 @@ int ctdb_ctrl_killtcp(struct ctdb_context *ctdb,
|
||||
*/
|
||||
int ctdb_ctrl_get_tcp_tickles(struct ctdb_context *ctdb,
|
||||
struct timeval timeout, uint32_t destnode,
|
||||
TALLOC_CTX *mem_ctx, uint32_t vnn,
|
||||
TALLOC_CTX *mem_ctx,
|
||||
struct sockaddr_in *ip,
|
||||
struct ctdb_control_tcp_tickle_list **list)
|
||||
{
|
||||
int ret;
|
||||
TDB_DATA data, outdata;
|
||||
int32_t status;
|
||||
|
||||
data.dptr = (uint8_t*)&vnn;
|
||||
data.dsize = sizeof(vnn);
|
||||
data.dptr = (uint8_t*)ip;
|
||||
data.dsize = sizeof(struct sockaddr_in);
|
||||
|
||||
ret = ctdb_control(ctdb, destnode, 0,
|
||||
CTDB_CONTROL_GET_TCP_TICKLE_LIST, 0, data,
|
||||
|
@ -52,7 +52,6 @@ CTDB_OPTIONS="$CTDB_OPTIONS --reclock=$CTDB_RECOVERY_LOCK"
|
||||
[ -z "$CTDB_NODES" ] || CTDB_OPTIONS="$CTDB_OPTIONS --nlist=$CTDB_NODES"
|
||||
[ -z "$CTDB_SOCKET" ] || CTDB_OPTIONS="$CTDB_OPTIONS --socket=$CTDB_SOCKET"
|
||||
[ -z "$CTDB_PUBLIC_ADDRESSES" ] || CTDB_OPTIONS="$CTDB_OPTIONS --public-addresses=$CTDB_PUBLIC_ADDRESSES"
|
||||
[ -z "$CTDB_PUBLIC_INTERFACE" ] || CTDB_OPTIONS="$CTDB_OPTIONS --public-interface=$CTDB_PUBLIC_INTERFACE"
|
||||
[ -z "$CTDB_DBDIR" ] || CTDB_OPTIONS="$CTDB_OPTIONS --dbdir=$CTDB_DBDIR"
|
||||
[ -z "$CTDB_EVENT_SCRIPT_DIR" ] || CTDB_OPTIONS="$CTDB_OPTIONS --event-script-dir $CTDB_EVENT_SCRIPT_DIR"
|
||||
[ -z "$CTDB_TRANSPORT" ] || CTDB_OPTIONS="$CTDB_OPTIONS --transport $CTDB_TRANSPORT"
|
||||
|
@ -5,17 +5,16 @@
|
||||
# there is no default
|
||||
# CTDB_RECOVERY_LOCK="/some/place/on/shared/storage"
|
||||
|
||||
# should ctdb do IP takeover? If it should, then specify a file
|
||||
# Should ctdb do IP takeover? If it should, then specify a file
|
||||
# containing the list of public IP addresses that ctdb will manage
|
||||
# Note that these IPs must be different from those in $NODES above
|
||||
# there is no default
|
||||
# there is no default.
|
||||
# The syntax is one line per public address of the form :
|
||||
# <ipaddress>/<netmask> <interface>
|
||||
# Example: 10.1.1.1/24 eth0
|
||||
#
|
||||
# CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
|
||||
|
||||
# when doing IP takeover you also must specify what network interface
|
||||
# to use for the public addresses
|
||||
# there is no default
|
||||
# CTDB_PUBLIC_INTERFACE=eth0
|
||||
|
||||
# should ctdb manage starting/stopping the Samba service for you?
|
||||
# default is to not manage Samba
|
||||
# CTDB_MANAGES_SAMBA=yes
|
||||
|
@ -11,13 +11,14 @@ loadconfig ctdb
|
||||
cmd="$1"
|
||||
shift
|
||||
|
||||
[ -z "$CTDB_PUBLIC_INTERFACE" ] && {
|
||||
[ "$cmd" = "startup" ] && {
|
||||
echo "Event script $0 : CTDB_PUBLIC_INTERFACE not set. Nothing to do."
|
||||
}
|
||||
exit 0
|
||||
[ -z "$CTDB_PUBLIC_ADDRESSES" ] && {
|
||||
CTDB_PUBLIC_ADDRESSES=/etc/ctdb/public_addresses
|
||||
}
|
||||
|
||||
[ ! -f "$CTDB_PUBLIC_ADDRESSES" ] && {
|
||||
echo "No public addresses file found. Nothing to do for 10.interfaces"
|
||||
exit 0
|
||||
}
|
||||
|
||||
case $cmd in
|
||||
#############################
|
||||
@ -84,13 +85,18 @@ case $cmd in
|
||||
|
||||
monitor)
|
||||
[ -x /usr/sbin/ethtool ] && {
|
||||
/usr/sbin/ethtool $CTDB_PUBLIC_INTERFACE | grep 'Link detected: yes' > /dev/null || {
|
||||
echo "`date` ERROR: No link on the public network interface $CTDB_PUBLIC_INTERFACE"
|
||||
exit 1
|
||||
}
|
||||
cat $CTDB_PUBLIC_ADDRESSES | sed -e "s/^[^\t ]*[\t ]*//" -e "s/[\t ]*$//" | sort | uniq | while read IFACE; do
|
||||
/usr/sbin/ethtool $IFACE | grep 'Link detected: yes' > /dev/null || {
|
||||
echo "`date` ERROR: No link on the public network interface $IFACE"
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
}
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
||||
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
.\" Title: ctdb
|
||||
.\" Author:
|
||||
.\" Generator: DocBook XSL Stylesheets v1.71.0 <http://docbook.sf.net/>
|
||||
.\" Date: 08/23/2007
|
||||
.\" Date: 09/03/2007
|
||||
.\" Manual:
|
||||
.\" Source:
|
||||
.\"
|
||||
.TH "CTDB" "1" "08/23/2007" "" ""
|
||||
.TH "CTDB" "1" "09/03/2007" "" ""
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
@ -180,7 +180,7 @@ Example output:
|
||||
.sp
|
||||
.RS 3n
|
||||
.nf
|
||||
Number of nodes:4
|
||||
Number of addresses:4
|
||||
12.1.1.1 0
|
||||
12.1.1.2 1
|
||||
12.1.1.3 2
|
||||
|
@ -89,7 +89,7 @@ response from 3 time=0.000114 sec (2 clients)
|
||||
</p><p>
|
||||
Example output:
|
||||
</p><pre class="screen">
|
||||
Number of nodes:4
|
||||
Number of addresses:4
|
||||
12.1.1.1 0
|
||||
12.1.1.2 1
|
||||
12.1.1.3 2
|
||||
|
@ -228,7 +228,7 @@ response from 3 time=0.000114 sec (2 clients)
|
||||
Example output:
|
||||
</para>
|
||||
<screen format="linespecific">
|
||||
Number of nodes:4
|
||||
Number of addresses:4
|
||||
12.1.1.1 0
|
||||
12.1.1.2 1
|
||||
12.1.1.3 2
|
||||
|
@ -1,11 +1,11 @@
|
||||
.\" Title: ctdbd
|
||||
.\" Author:
|
||||
.\" Generator: DocBook XSL Stylesheets v1.71.0 <http://docbook.sf.net/>
|
||||
.\" Date: 08/23/2007
|
||||
.\" Date: 09/03/2007
|
||||
.\" Manual:
|
||||
.\" Source:
|
||||
.\"
|
||||
.TH "CTDBD" "1" "08/23/2007" "" ""
|
||||
.TH "CTDBD" "1" "09/03/2007" "" ""
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
@ -16,7 +16,7 @@ ctdbd \- The CTDB cluster daemon
|
||||
.HP 6
|
||||
\fBctdbd\fR
|
||||
.HP 6
|
||||
\fBctdbd\fR {\-\-reclock=<filename>} {\-\-nlist=<filename>} {\-\-dbdir=<directory>} [\-?\ \-\-help] [\-\-usage] [\-i\ \-\-interactive] [\-\-public\-addresses=<filename>] [\-\-public\-interface=<interface>] [\-\-event\-script=<filename>] [\-\-logfile=<filename>] [\-\-listen=<address>] [\-\-transport=<STRING>] [\-\-socket=<filename>] [\-d\ \-\-debug=<INTEGER>] [\-\-torture]
|
||||
\fBctdbd\fR {\-\-reclock=<filename>} {\-\-nlist=<filename>} {\-\-dbdir=<directory>} [\-?\ \-\-help] [\-\-usage] [\-i\ \-\-interactive] [\-\-public\-addresses=<filename>] [\-\-event\-script=<filename>] [\-\-logfile=<filename>] [\-\-listen=<address>] [\-\-transport=<STRING>] [\-\-socket=<filename>] [\-d\ \-\-debug=<INTEGER>] [\-\-torture]
|
||||
.SH "DESCRIPTION"
|
||||
.PP
|
||||
ctdbd is the main ctdb daemon.
|
||||
@ -66,16 +66,11 @@ By default ctdbd will detach itself from the shell and run in the background as
|
||||
.PP
|
||||
\-\-public_addresses=<filename>
|
||||
.RS 3n
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains one entry for each node in the cluster.
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains a list of ip addresses netmasks and interfaces. When ctdb is operational it iwll distribute these public ip addresses evenly across the availabel nodes.
|
||||
.sp
|
||||
This is usually the file /etc/ctdb/public_addresses
|
||||
.RE
|
||||
.PP
|
||||
\-\-public\-interface=<interface>
|
||||
.RS 3n
|
||||
When used with IP takeover this option specifies which physical interface should be used to attach the public addresses to.
|
||||
.RE
|
||||
.PP
|
||||
\-\-event\-script=<filename>
|
||||
.RS 3n
|
||||
This option is used to specify which events script that ctdbd will use to manage services when the cluster configuration changes.
|
||||
@ -122,7 +117,7 @@ You do NOT want to use this option unless you are developing and testing new fun
|
||||
.RE
|
||||
.SH "PRIVATE VS PUBLIC ADDRESSES"
|
||||
.PP
|
||||
When used for ip takeover in a HA environment, each node in a ctdb cluster has two ip addresses assigned to it. One private and one public.
|
||||
When used for ip takeover in a HA environment, each node in a ctdb cluster has multiple ip addresses assigned to it. One private and one or more public.
|
||||
.SS "Private address"
|
||||
.PP
|
||||
This is the physical ip address of the node which is configured in linux and attached to a physical interface. This address uniquely identifies a physical node in the cluster and is the ip addresses that ctdbd will use to communicate with the ctdbd daemons on the other nodes in the cluster.
|
||||
@ -149,28 +144,28 @@ Since the private addresses are only available to the network when the correspon
|
||||
.PP
|
||||
A public address on the other hand is not attached to an interface. This address is managed by ctdbd itself and is attached/detached to a physical node at runtime. You should NOT have this address configured to an interface in linux. Let ctdbd manage these addresses.
|
||||
.PP
|
||||
The ctdb cluster will assign/reassign these public addresses across the available nodes in the cluster. When one node fails, its public address will be migrated to and taken over by a different node in the cluster to ensure that all public addresses are always available to clients.
|
||||
The ctdb cluster will assign/reassign these public addresses across the available healthy nodes in the cluster. When one node fails, its public address will be migrated to and taken over by a different node in the cluster to ensure that all public addresses are always available to clients.
|
||||
.PP
|
||||
These addresses are not physically attached to a specific node. The 'ctdb ip' command can be used to view the current assignment of public addresses and which physical node is currently serving it.
|
||||
.PP
|
||||
By default, each node will when operational always serve its primary public address which is the corresponding line for that node number in the public addresses file. I.e. as long as node X is available and fully oprational it will always be the node that serves the corresponding public address.
|
||||
.PP
|
||||
The list of public addresses also contain the netmask for that address. the reason for this is because ctdbd needs to know which mask to use when it adds/removes the address from a physical node. This netmask is also used by ctdbd when making decisions on which node should take over a public ip address for a failed node. A node will only be allowed to take over a public address from a different node IFF that public address resides in the same subnet as the primary public address for that node.
|
||||
The list of public addresses also contain the netmask and the interface where this address should be attached.
|
||||
|
||||
Example /etc/ctdb/public_addresses for a four node cluster:
|
||||
|
||||
.sp
|
||||
.RS 3n
|
||||
.nf
|
||||
11.1.1.1/24
|
||||
11.1.1.2/24
|
||||
11.1.2.1/24
|
||||
11.1.2.2/24
|
||||
11.1.1.1/24 eth0
|
||||
11.1.1.2/24 eth0
|
||||
11.1.2.1/24 eth1
|
||||
11.1.2.2/24 eth1
|
||||
|
||||
.fi
|
||||
.RE
|
||||
.PP
|
||||
In this example, if node 3 fails, its public address can be taken over by node 2 since node 2 is on the same subnet as 3 but not by node 0 or node 1 since node 0 and 1 are both on a different subnet from node 3.
|
||||
In this example, two nodes in the cluster will serve 11.1.1.1 and 11.1.1.2 through interface eth0 and two (possibly other) nodes will serve 11.1.2.1 and 11.1.2.2 through eth1.
|
||||
.PP
|
||||
The public address file must be the same on all nodes. Since this file also specifies which interface the address should be attached to it is imporant that all nodes use the same naming convention for interfaces.
|
||||
.SH "NODE STATUS"
|
||||
.PP
|
||||
The current status of each node in the cluster can be viewed by the 'ctdb status' command.
|
||||
|
@ -1,4 +1,4 @@
|
||||
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>ctdbd</title><meta name="generator" content="DocBook XSL Stylesheets V1.71.0"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" lang="en"><a name="ctdbd.1"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>ctdbd — The CTDB cluster daemon</p></div><div class="refsynopsisdiv"><h2>Synopsis</h2><div class="cmdsynopsis"><p><code class="command">ctdbd</code> </p></div><div class="cmdsynopsis"><p><code class="command">ctdbd</code> {--reclock=<filename>} {--nlist=<filename>} {--dbdir=<directory>} [-? --help] [--usage] [-i --interactive] [--public-addresses=<filename>] [--public-interface=<interface>] [--event-script=<filename>] [--logfile=<filename>] [--listen=<address>] [--transport=<STRING>] [--socket=<filename>] [-d --debug=<INTEGER>] [--torture]</p></div></div><div class="refsect1" lang="en"><a name="id2481068"></a><h2>DESCRIPTION</h2><p>
|
||||
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>ctdbd</title><meta name="generator" content="DocBook XSL Stylesheets V1.71.0"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" lang="en"><a name="ctdbd.1"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>ctdbd — The CTDB cluster daemon</p></div><div class="refsynopsisdiv"><h2>Synopsis</h2><div class="cmdsynopsis"><p><code class="command">ctdbd</code> </p></div><div class="cmdsynopsis"><p><code class="command">ctdbd</code> {--reclock=<filename>} {--nlist=<filename>} {--dbdir=<directory>} [-? --help] [--usage] [-i --interactive] [--public-addresses=<filename>] [--event-script=<filename>] [--logfile=<filename>] [--listen=<address>] [--transport=<STRING>] [--socket=<filename>] [-d --debug=<INTEGER>] [--torture]</p></div></div><div class="refsect1" lang="en"><a name="id2480886"></a><h2>DESCRIPTION</h2><p>
|
||||
ctdbd is the main ctdb daemon.
|
||||
</p><p>
|
||||
ctdbd provides a clustered version of the TDB database with automatic rebuild/recovery of the databases upon nodefailures.
|
||||
@ -8,7 +8,7 @@
|
||||
ctdbd provides monitoring of all nodes in the cluster and automatically reconfigures the cluster and recovers upon node failures.
|
||||
</p><p>
|
||||
ctdbd is the main component in clustered Samba that provides a high-awailability load-sharing CIFS server cluster.
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2481100"></a><h2>OPTIONS</h2><div class="variablelist"><dl><dt><span class="term">-? --help</span></dt><dd><p>
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2481092"></a><h2>OPTIONS</h2><div class="variablelist"><dl><dt><span class="term">-? --help</span></dt><dd><p>
|
||||
Print some help text to the screen.
|
||||
</p></dd><dt><span class="term">--usage</span></dt><dd><p>
|
||||
Print useage information to the screen.
|
||||
@ -28,11 +28,9 @@
|
||||
By default ctdbd will detach itself from the shell and run in
|
||||
the background as a daemon. This option makes ctdbd to start in interactive mode.
|
||||
</p></dd><dt><span class="term">--public_addresses=<filename></span></dt><dd><p>
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains one entry for each node in the cluster.
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains a list of ip addresses netmasks and interfaces. When ctdb is operational it iwll distribute these public ip addresses evenly across the availabel nodes.
|
||||
</p><p>
|
||||
This is usually the file /etc/ctdb/public_addresses
|
||||
</p></dd><dt><span class="term">--public-interface=<interface></span></dt><dd><p>
|
||||
When used with IP takeover this option specifies which physical interface should be used to attach the public addresses to.
|
||||
</p></dd><dt><span class="term">--event-script=<filename></span></dt><dd><p>
|
||||
This option is used to specify which events script that ctdbd will
|
||||
use to manage services when the cluster configuration changes.
|
||||
@ -58,10 +56,10 @@
|
||||
This option is only used for development and testing of ctdbd. It adds artificial errors and failures to the common codepaths in ctdbd to verify that ctdbd can recover correctly for failures.
|
||||
</p><p>
|
||||
You do NOT want to use this option unless you are developing and testing new functionality in ctdbd.
|
||||
</p></dd></dl></div></div><div class="refsect1" lang="en"><a name="id2528438"></a><h2>Private vs Public addresses</h2><p>
|
||||
</p></dd></dl></div></div><div class="refsect1" lang="en"><a name="id2528418"></a><h2>Private vs Public addresses</h2><p>
|
||||
When used for ip takeover in a HA environment, each node in a ctdb
|
||||
cluster has two ip addresses assigned to it. One private and one public.
|
||||
</p><div class="refsect2" lang="en"><a name="id2528448"></a><h3>Private address</h3><p>
|
||||
cluster has multiple ip addresses assigned to it. One private and one or more public.
|
||||
</p><div class="refsect2" lang="en"><a name="id2528428"></a><h3>Private address</h3><p>
|
||||
This is the physical ip address of the node which is configured in
|
||||
linux and attached to a physical interface. This address uniquely
|
||||
identifies a physical node in the cluster and is the ip addresses
|
||||
@ -89,14 +87,14 @@
|
||||
10.1.1.2
|
||||
10.1.1.3
|
||||
10.1.1.4
|
||||
</pre></div><div class="refsect2" lang="en"><a name="id2528495"></a><h3>Public address</h3><p>
|
||||
</pre></div><div class="refsect2" lang="en"><a name="id2528475"></a><h3>Public address</h3><p>
|
||||
A public address on the other hand is not attached to an interface.
|
||||
This address is managed by ctdbd itself and is attached/detached to
|
||||
a physical node at runtime. You should NOT have this address configured
|
||||
to an interface in linux. Let ctdbd manage these addresses.
|
||||
</p><p>
|
||||
The ctdb cluster will assign/reassign these public addresses across the
|
||||
available nodes in the cluster. When one node fails, its public address
|
||||
available healthy nodes in the cluster. When one node fails, its public address
|
||||
will be migrated to and taken over by a different node in the cluster
|
||||
to ensure that all public addresses are always available to clients.
|
||||
</p><p>
|
||||
@ -104,32 +102,25 @@
|
||||
The 'ctdb ip' command can be used to view the current assignment of
|
||||
public addresses and which physical node is currently serving it.
|
||||
</p><p>
|
||||
By default, each node will when operational always serve its primary
|
||||
public address which is the corresponding line for that node number
|
||||
in the public addresses file. I.e. as long as node X is available and
|
||||
fully oprational it will always be the node that serves the
|
||||
corresponding public address.
|
||||
</p><p>
|
||||
The list of public addresses also contain the netmask for that address.
|
||||
the reason for this is because ctdbd needs to know which mask to use
|
||||
when it adds/removes the address from a physical node. This netmask
|
||||
is also used by ctdbd when making decisions on which node should take
|
||||
over a public ip address for a failed node.
|
||||
A node will only be allowed to take over a public address from a
|
||||
different node IFF that public address resides in the same subnet
|
||||
as the primary public address for that node.
|
||||
The list of public addresses also contain the netmask and the
|
||||
interface where this address should be attached.
|
||||
</p>
|
||||
Example /etc/ctdb/public_addresses for a four node cluster:
|
||||
<pre class="screen">
|
||||
11.1.1.1/24
|
||||
11.1.1.2/24
|
||||
11.1.2.1/24
|
||||
11.1.2.2/24
|
||||
11.1.1.1/24 eth0
|
||||
11.1.1.2/24 eth0
|
||||
11.1.2.1/24 eth1
|
||||
11.1.2.2/24 eth1
|
||||
</pre><p>
|
||||
In this example, if node 3 fails, its public address can be taken over
|
||||
by node 2 since node 2 is on the same subnet as 3 but not by node 0 or
|
||||
node 1 since node 0 and 1 are both on a different subnet from node 3.
|
||||
</p></div></div><div class="refsect1" lang="en"><a name="id2528564"></a><h2>Node status</h2><p>
|
||||
In this example, two nodes in the cluster will serve 11.1.1.1 and
|
||||
11.1.1.2 through interface eth0 and two (possibly other) nodes will
|
||||
serve 11.1.2.1 and 11.1.2.2 through eth1.
|
||||
</p><p>
|
||||
The public address file must be the same on all nodes.
|
||||
Since this file also specifies which interface the address should be
|
||||
attached to it is imporant that all nodes use the same naming convention
|
||||
for interfaces.
|
||||
</p></div></div><div class="refsect1" lang="en"><a name="id2528534"></a><h2>Node status</h2><p>
|
||||
The current status of each node in the cluster can be viewed by the
|
||||
'ctdb status' command.
|
||||
</p><p>
|
||||
@ -160,10 +151,10 @@
|
||||
investigated and require an administrative action to rectify. This node
|
||||
does not perticipate in the CTDB cluster but can still be communicated
|
||||
with. I.e. ctdb commands can be sent to it.
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2528621"></a><h2>SEE ALSO</h2><p>
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2528591"></a><h2>SEE ALSO</h2><p>
|
||||
ctdb(1), onnode(1)
|
||||
<a href="http://ctdb.samba.org/" target="_top">http://ctdb.samba.org/</a>
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2528634"></a><h2>COPYRIGHT/LICENSE</h2><div class="literallayout"><p><br>
|
||||
</p></div><div class="refsect1" lang="en"><a name="id2528604"></a><h2>COPYRIGHT/LICENSE</h2><div class="literallayout"><p><br>
|
||||
Copyright (C) Andrew Tridgell 2007<br>
|
||||
Copyright (C) Ronnie sahlberg 2007<br>
|
||||
<br>
|
||||
|
@ -27,7 +27,6 @@
|
||||
<arg choice="opt">--usage</arg>
|
||||
<arg choice="opt">-i --interactive</arg>
|
||||
<arg choice="opt">--public-addresses=<filename></arg>
|
||||
<arg choice="opt">--public-interface=<interface></arg>
|
||||
<arg choice="opt">--event-script=<filename></arg>
|
||||
<arg choice="opt">--logfile=<filename></arg>
|
||||
<arg choice="opt">--listen=<address></arg>
|
||||
@ -122,7 +121,7 @@
|
||||
<varlistentry><term>--public_addresses=<filename></term>
|
||||
<listitem>
|
||||
<para>
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains one entry for each node in the cluster.
|
||||
When used with IP takeover this specifies a file containing the public ip addresses to use on the cluster. This file contains a list of ip addresses netmasks and interfaces. When ctdb is operational it iwll distribute these public ip addresses evenly across the availabel nodes.
|
||||
</para>
|
||||
<para>
|
||||
This is usually the file /etc/ctdb/public_addresses
|
||||
@ -130,14 +129,6 @@
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term>--public-interface=<interface></term>
|
||||
<listitem>
|
||||
<para>
|
||||
When used with IP takeover this option specifies which physical interface should be used to attach the public addresses to.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term>--event-script=<filename></term>
|
||||
<listitem>
|
||||
<para>
|
||||
@ -216,7 +207,7 @@
|
||||
<refsect1><title>Private vs Public addresses</title>
|
||||
<para>
|
||||
When used for ip takeover in a HA environment, each node in a ctdb
|
||||
cluster has two ip addresses assigned to it. One private and one public.
|
||||
cluster has multiple ip addresses assigned to it. One private and one or more public.
|
||||
</para>
|
||||
|
||||
<refsect2><title>Private address</title>
|
||||
@ -262,7 +253,7 @@
|
||||
</para>
|
||||
<para>
|
||||
The ctdb cluster will assign/reassign these public addresses across the
|
||||
available nodes in the cluster. When one node fails, its public address
|
||||
available healthy nodes in the cluster. When one node fails, its public address
|
||||
will be migrated to and taken over by a different node in the cluster
|
||||
to ensure that all public addresses are always available to clients.
|
||||
</para>
|
||||
@ -272,33 +263,26 @@
|
||||
public addresses and which physical node is currently serving it.
|
||||
</para>
|
||||
<para>
|
||||
By default, each node will when operational always serve its primary
|
||||
public address which is the corresponding line for that node number
|
||||
in the public addresses file. I.e. as long as node X is available and
|
||||
fully oprational it will always be the node that serves the
|
||||
corresponding public address.
|
||||
</para>
|
||||
<para>
|
||||
The list of public addresses also contain the netmask for that address.
|
||||
the reason for this is because ctdbd needs to know which mask to use
|
||||
when it adds/removes the address from a physical node. This netmask
|
||||
is also used by ctdbd when making decisions on which node should take
|
||||
over a public ip address for a failed node.
|
||||
A node will only be allowed to take over a public address from a
|
||||
different node IFF that public address resides in the same subnet
|
||||
as the primary public address for that node.
|
||||
The list of public addresses also contain the netmask and the
|
||||
interface where this address should be attached.
|
||||
</para>
|
||||
Example /etc/ctdb/public_addresses for a four node cluster:
|
||||
<screen format="linespecific">
|
||||
11.1.1.1/24
|
||||
11.1.1.2/24
|
||||
11.1.2.1/24
|
||||
11.1.2.2/24
|
||||
11.1.1.1/24 eth0
|
||||
11.1.1.2/24 eth0
|
||||
11.1.2.1/24 eth1
|
||||
11.1.2.2/24 eth1
|
||||
</screen>
|
||||
<para>
|
||||
In this example, if node 3 fails, its public address can be taken over
|
||||
by node 2 since node 2 is on the same subnet as 3 but not by node 0 or
|
||||
node 1 since node 0 and 1 are both on a different subnet from node 3.
|
||||
In this example, two nodes in the cluster will serve 11.1.1.1 and
|
||||
11.1.1.2 through interface eth0 and two (possibly other) nodes will
|
||||
serve 11.1.2.1 and 11.1.2.2 through eth1.
|
||||
</para>
|
||||
<para>
|
||||
The public address file must be the same on all nodes.
|
||||
Since this file also specifies which interface the address should be
|
||||
attached to it is imporant that all nodes use the same naming convention
|
||||
for interfaces.
|
||||
</para>
|
||||
</refsect2>
|
||||
</refsect1>
|
||||
|
@ -53,7 +53,7 @@ struct ctdb_tcp_wire_array {
|
||||
|
||||
/* the list of tcp tickles used by get/set tcp tickle list */
|
||||
struct ctdb_control_tcp_tickle_list {
|
||||
uint32_t vnn;
|
||||
struct sockaddr_in ip;
|
||||
struct ctdb_tcp_wire_array tickles;
|
||||
};
|
||||
|
||||
@ -136,6 +136,35 @@ struct ctdb_client {
|
||||
};
|
||||
|
||||
|
||||
/* state associated with a public ip address */
|
||||
struct ctdb_vnn {
|
||||
struct ctdb_vnn *next;
|
||||
|
||||
struct ctdb_vnn_list *vnn_list;
|
||||
|
||||
const char *public_address;
|
||||
uint8_t public_netmask_bits;
|
||||
|
||||
/* the node number that is serving this public address, if any.
|
||||
If no node serves this ip it is set to -1 */
|
||||
int32_t pnn;
|
||||
|
||||
/* List of clients to tickle for this public address */
|
||||
struct ctdb_tcp_array *tcp_array;
|
||||
|
||||
/* whether we need to update the other nodes with changes to our list
|
||||
of connected clients */
|
||||
bool tcp_update_needed;
|
||||
};
|
||||
|
||||
struct ctdb_vnn_list {
|
||||
struct ctdb_vnn_list *next;
|
||||
const char *iface;
|
||||
uint32_t num_ips;
|
||||
struct ctdb_vnn *vnn;
|
||||
struct ctdb_kill_tcp *killtcp;
|
||||
};
|
||||
|
||||
/*
|
||||
state associated with one node
|
||||
*/
|
||||
@ -144,7 +173,7 @@ struct ctdb_node {
|
||||
struct ctdb_address address;
|
||||
const char *name; /* for debug messages */
|
||||
void *private_data; /* private to transport */
|
||||
uint32_t vnn;
|
||||
uint32_t pnn;
|
||||
#define NODE_FLAGS_DISCONNECTED 0x00000001 /* node isn't connected */
|
||||
#define NODE_FLAGS_UNHEALTHY 0x00000002 /* monitoring says node is unhealthy */
|
||||
#define NODE_FLAGS_PERMANENTLY_DISABLED 0x00000004 /* administrator has disabled node */
|
||||
@ -161,21 +190,6 @@ struct ctdb_node {
|
||||
/* a list of controls pending to this node, so we can time them out quickly
|
||||
if the node becomes disconnected */
|
||||
struct daemon_control_state *pending_controls;
|
||||
|
||||
/* the public address of this node, if known */
|
||||
const char *public_address;
|
||||
uint8_t public_netmask_bits;
|
||||
|
||||
/* the node number that has taken over this nodes public address, if any.
|
||||
If not taken over, then set to -1 */
|
||||
int32_t takeover_vnn;
|
||||
|
||||
/* List of clients to tickle for this public address */
|
||||
struct ctdb_tcp_array *tcp_array;
|
||||
|
||||
/* whether we need to update the other nodes with changes to our list
|
||||
of connected clients */
|
||||
bool tcp_update_needed;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -299,14 +313,6 @@ enum ctdb_freeze_mode {CTDB_FREEZE_NONE, CTDB_FREEZE_PENDING, CTDB_FREEZE_FROZEN
|
||||
#define CTDB_MONITORING_ACTIVE 0
|
||||
#define CTDB_MONITORING_DISABLED 1
|
||||
|
||||
/* information about IP takeover */
|
||||
struct ctdb_takeover {
|
||||
bool enabled;
|
||||
const char *interface;
|
||||
const char *event_script_dir;
|
||||
TALLOC_CTX *last_ctx;
|
||||
};
|
||||
|
||||
/* main state of the ctdb daemon */
|
||||
struct ctdb_context {
|
||||
struct event_context *ev;
|
||||
@ -332,6 +338,7 @@ struct ctdb_context {
|
||||
struct idr_context *idr;
|
||||
uint16_t idr_cnt;
|
||||
struct ctdb_node **nodes; /* array of nodes in the cluster - indexed by vnn */
|
||||
struct ctdb_vnn_list *vnn_list; /* list of public ip addresses and interfaces */
|
||||
char *err_msg;
|
||||
const struct ctdb_methods *methods; /* transport methods */
|
||||
const struct ctdb_upcalls *upcalls; /* transport upcalls */
|
||||
@ -344,12 +351,12 @@ struct ctdb_context {
|
||||
uint32_t num_clients;
|
||||
uint32_t recovery_master;
|
||||
struct ctdb_call_state *pending_calls;
|
||||
struct ctdb_takeover takeover;
|
||||
struct ctdb_client_ip *client_ip_list;
|
||||
bool do_setsched;
|
||||
void *saved_scheduler_param;
|
||||
struct ctdb_kill_tcp *killtcp;
|
||||
struct _trbt_tree_t *server_ids;
|
||||
const char *event_script_dir;
|
||||
TALLOC_CTX *takeover_ctx;
|
||||
};
|
||||
|
||||
struct ctdb_db_context {
|
||||
@ -503,7 +510,6 @@ struct ctdb_control_killtcp {
|
||||
struct for tcp_add and tcp_remove controls
|
||||
*/
|
||||
struct ctdb_control_tcp_vnn {
|
||||
uint32_t vnn;
|
||||
struct sockaddr_in src;
|
||||
struct sockaddr_in dest;
|
||||
};
|
||||
@ -954,7 +960,7 @@ struct ctdb_control_list_tunable {
|
||||
status
|
||||
*/
|
||||
struct ctdb_node_and_flags {
|
||||
uint32_t vnn;
|
||||
uint32_t pnn;
|
||||
uint32_t flags;
|
||||
struct sockaddr_in sin;
|
||||
|
||||
@ -1031,8 +1037,7 @@ int32_t ctdb_control_release_ip(struct ctdb_context *ctdb,
|
||||
bool *async_reply);
|
||||
|
||||
struct ctdb_public_ip {
|
||||
uint32_t vnn;
|
||||
uint32_t takeover_vnn;
|
||||
uint32_t pnn;
|
||||
struct sockaddr_in sin;
|
||||
};
|
||||
int ctdb_ctrl_takeover_ip(struct ctdb_context *ctdb, struct timeval timeout,
|
||||
@ -1122,7 +1127,7 @@ int ctdb_ctrl_get_tcp_tickles(struct ctdb_context *ctdb,
|
||||
struct timeval timeout,
|
||||
uint32_t destnode,
|
||||
TALLOC_CTX *mem_ctx,
|
||||
uint32_t vnn,
|
||||
struct sockaddr_in *ip,
|
||||
struct ctdb_control_tcp_tickle_list **list);
|
||||
|
||||
|
||||
|
@ -289,7 +289,7 @@ static int32_t ctdb_control_dispatch(struct ctdb_context *ctdb,
|
||||
return ctdb_control_kill_tcp(ctdb, indata);
|
||||
|
||||
case CTDB_CONTROL_GET_TCP_TICKLE_LIST:
|
||||
CHECK_CONTROL_DATA_SIZE(sizeof(uint32_t));
|
||||
CHECK_CONTROL_DATA_SIZE(sizeof(struct sockaddr_in));
|
||||
return ctdb_control_get_tcp_tickle_list(ctdb, indata, outdata);
|
||||
|
||||
case CTDB_CONTROL_SET_TCP_TICKLE_LIST:
|
||||
|
@ -43,7 +43,7 @@ static void ctdb_check_for_dead_nodes(struct event_context *ev, struct timed_eve
|
||||
/* send a keepalive to all other nodes, unless */
|
||||
for (i=0;i<ctdb->num_nodes;i++) {
|
||||
struct ctdb_node *node = ctdb->nodes[i];
|
||||
if (node->vnn == ctdb->vnn) {
|
||||
if (node->pnn == ctdb->vnn) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -65,9 +65,9 @@ static void ctdb_check_for_dead_nodes(struct event_context *ev, struct timed_eve
|
||||
node->rx_cnt = 0;
|
||||
|
||||
if (node->dead_count >= ctdb->tunable.keepalive_limit) {
|
||||
DEBUG(0,("dead count reached for node %u\n", node->vnn));
|
||||
DEBUG(0,("dead count reached for node %u\n", node->pnn));
|
||||
ctdb_node_dead(node);
|
||||
ctdb_send_keepalive(ctdb, node->vnn);
|
||||
ctdb_send_keepalive(ctdb, node->pnn);
|
||||
/* maybe tell the transport layer to kill the
|
||||
sockets as well?
|
||||
*/
|
||||
@ -75,8 +75,8 @@ static void ctdb_check_for_dead_nodes(struct event_context *ev, struct timed_eve
|
||||
}
|
||||
|
||||
if (node->tx_cnt == 0) {
|
||||
DEBUG(5,("sending keepalive to %u\n", node->vnn));
|
||||
ctdb_send_keepalive(ctdb, node->vnn);
|
||||
DEBUG(5,("sending keepalive to %u\n", node->pnn));
|
||||
ctdb_send_keepalive(ctdb, node->pnn);
|
||||
}
|
||||
|
||||
node->tx_cnt = 0;
|
||||
|
@ -162,7 +162,7 @@ ctdb_control_getnodemap(struct ctdb_context *ctdb, uint32_t opcode, TDB_DATA ind
|
||||
node_map->num = num_nodes;
|
||||
for (i=0; i<num_nodes; i++) {
|
||||
inet_aton(ctdb->nodes[i]->address.address, &node_map->nodes[i].sin.sin_addr);
|
||||
node_map->nodes[i].vnn = ctdb->nodes[i]->vnn;
|
||||
node_map->nodes[i].pnn = ctdb->nodes[i]->pnn;
|
||||
node_map->nodes[i].flags = ctdb->nodes[i]->flags;
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ static enum monitor_result freeze_all_nodes(struct ctdb_context *ctdb, struct ct
|
||||
}
|
||||
state = ctdb_ctrl_freeze_send(ctdb, mem_ctx,
|
||||
CONTROL_TIMEOUT(),
|
||||
nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn);
|
||||
if (state == NULL) {
|
||||
/* we failed to send the control, treat this as
|
||||
an error and try again next iteration
|
||||
@ -222,16 +222,16 @@ static int set_recovery_mode(struct ctdb_context *ctdb, struct ctdb_node_map *no
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_setrecmode(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn, rec_mode);
|
||||
ret = ctdb_ctrl_setrecmode(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn, rec_mode);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to set recmode on node %u\n", nodemap->nodes[j].vnn));
|
||||
DEBUG(0, (__location__ " Unable to set recmode on node %u\n", nodemap->nodes[j].pnn));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (rec_mode == CTDB_RECOVERY_NORMAL) {
|
||||
ret = ctdb_ctrl_thaw(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn);
|
||||
ret = ctdb_ctrl_thaw(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to thaw node %u\n", nodemap->nodes[j].vnn));
|
||||
DEBUG(0, (__location__ " Unable to thaw node %u\n", nodemap->nodes[j].pnn));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -254,9 +254,9 @@ static int set_recovery_master(struct ctdb_context *ctdb, struct ctdb_node_map *
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_setrecmaster(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn, vnn);
|
||||
ret = ctdb_ctrl_setrecmaster(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn, vnn);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to set recmaster on node %u\n", nodemap->nodes[j].vnn));
|
||||
DEBUG(0, (__location__ " Unable to set recmaster on node %u\n", nodemap->nodes[j].pnn));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ static int create_missing_remote_databases(struct ctdb_context *ctdb, struct ctd
|
||||
/* verify that all other nodes have all our databases */
|
||||
for (j=0; j<nodemap->num; j++) {
|
||||
/* we dont need to ourself ourselves */
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
/* dont check nodes that are unavailable */
|
||||
@ -285,7 +285,7 @@ static int create_missing_remote_databases(struct ctdb_context *ctdb, struct ctd
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_getdbmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_getdbmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
mem_ctx, &remote_dbmap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to get dbids from node %u\n", vnn));
|
||||
@ -312,7 +312,7 @@ static int create_missing_remote_databases(struct ctdb_context *ctdb, struct ctd
|
||||
DEBUG(0, (__location__ " Unable to get dbname from node %u\n", vnn));
|
||||
return -1;
|
||||
}
|
||||
ctdb_ctrl_createdb(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn, mem_ctx, name);
|
||||
ctdb_ctrl_createdb(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn, mem_ctx, name);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to create remote db:%s\n", name));
|
||||
return -1;
|
||||
@ -336,7 +336,7 @@ static int create_missing_local_databases(struct ctdb_context *ctdb, struct ctdb
|
||||
/* verify that we have all database any other node has */
|
||||
for (j=0; j<nodemap->num; j++) {
|
||||
/* we dont need to ourself ourselves */
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
/* dont check nodes that are unavailable */
|
||||
@ -344,7 +344,7 @@ static int create_missing_local_databases(struct ctdb_context *ctdb, struct ctdb
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_getdbmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_getdbmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
mem_ctx, &remote_dbmap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to get dbids from node %u\n", vnn));
|
||||
@ -367,11 +367,11 @@ static int create_missing_local_databases(struct ctdb_context *ctdb, struct ctdb
|
||||
/* ok so we need to create this database and
|
||||
rebuild dbmap
|
||||
*/
|
||||
ctdb_ctrl_getdbname(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ctdb_ctrl_getdbname(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
remote_dbmap->dbids[db], mem_ctx, &name);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to get dbname from node %u\n",
|
||||
nodemap->nodes[j].vnn));
|
||||
nodemap->nodes[j].pnn));
|
||||
return -1;
|
||||
}
|
||||
ctdb_ctrl_createdb(ctdb, CONTROL_TIMEOUT(), vnn, mem_ctx, name);
|
||||
@ -405,18 +405,18 @@ static int pull_all_remote_databases(struct ctdb_context *ctdb, struct ctdb_node
|
||||
for (i=0;i<dbmap->num;i++) {
|
||||
for (j=0; j<nodemap->num; j++) {
|
||||
/* we dont need to merge with ourselves */
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
/* dont merge from nodes that are unavailable */
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
ret = ctdb_ctrl_copydb(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_copydb(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
vnn, dbmap->dbids[i], CTDB_LMASTER_ANY, mem_ctx);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to copy db from node %u to node %u\n",
|
||||
nodemap->nodes[j].vnn, vnn));
|
||||
nodemap->nodes[j].pnn, vnn));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -441,9 +441,9 @@ static int update_dmaster_on_all_databases(struct ctdb_context *ctdb, struct ctd
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
ret = ctdb_ctrl_setdmaster(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn, ctdb, dbmap->dbids[i], vnn);
|
||||
ret = ctdb_ctrl_setdmaster(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn, ctdb, dbmap->dbids[i], vnn);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to set dmaster for node %u db:0x%08x\n", nodemap->nodes[j].vnn, dbmap->dbids[i]));
|
||||
DEBUG(0, (__location__ " Unable to set dmaster for node %u db:0x%08x\n", nodemap->nodes[j].pnn, dbmap->dbids[i]));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -463,7 +463,7 @@ static int update_flags_on_all_nodes(struct ctdb_context *ctdb, struct ctdb_node
|
||||
struct ctdb_node_flag_change c;
|
||||
TDB_DATA data;
|
||||
|
||||
c.vnn = nodemap->nodes[i].vnn;
|
||||
c.vnn = nodemap->nodes[i].pnn;
|
||||
c.old_flags = nodemap->nodes[i].flags;
|
||||
c.new_flags = nodemap->nodes[i].flags;
|
||||
|
||||
@ -496,11 +496,11 @@ static int vacuum_db(struct ctdb_context *ctdb, uint32_t db_id, struct ctdb_node
|
||||
if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
ret = ctdb_ctrl_set_rsn_nonempty(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].vnn,
|
||||
ret = ctdb_ctrl_set_rsn_nonempty(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].pnn,
|
||||
db_id, max_rsn+1);
|
||||
if (ret != 0) {
|
||||
DEBUG(0,(__location__ " Failed to set rsn on node %u to %llu\n",
|
||||
nodemap->nodes[i].vnn, (unsigned long long)max_rsn+1));
|
||||
nodemap->nodes[i].pnn, (unsigned long long)max_rsn+1));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -510,11 +510,11 @@ static int vacuum_db(struct ctdb_context *ctdb, uint32_t db_id, struct ctdb_node
|
||||
if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
ret = ctdb_ctrl_delete_low_rsn(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].vnn,
|
||||
ret = ctdb_ctrl_delete_low_rsn(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].pnn,
|
||||
db_id, max_rsn+1);
|
||||
if (ret != 0) {
|
||||
DEBUG(0,(__location__ " Failed to delete records on node %u with rsn below %llu\n",
|
||||
nodemap->nodes[i].vnn, (unsigned long long)max_rsn+1));
|
||||
nodemap->nodes[i].pnn, (unsigned long long)max_rsn+1));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -554,18 +554,18 @@ static int push_all_local_databases(struct ctdb_context *ctdb, struct ctdb_node_
|
||||
for (i=0;i<dbmap->num;i++) {
|
||||
for (j=0; j<nodemap->num; j++) {
|
||||
/* we dont need to push to ourselves */
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
/* dont push to nodes that are unavailable */
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
ret = ctdb_ctrl_copydb(ctdb, CONTROL_TIMEOUT(), vnn, nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_copydb(ctdb, CONTROL_TIMEOUT(), vnn, nodemap->nodes[j].pnn,
|
||||
dbmap->dbids[i], CTDB_LMASTER_ANY, mem_ctx);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to copy db from node %u to node %u\n",
|
||||
vnn, nodemap->nodes[j].vnn));
|
||||
vnn, nodemap->nodes[j].pnn));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -590,7 +590,7 @@ static int update_vnnmap_on_all_nodes(struct ctdb_context *ctdb, struct ctdb_nod
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_setvnnmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn, mem_ctx, vnnmap);
|
||||
ret = ctdb_ctrl_setvnnmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn, mem_ctx, vnnmap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to set vnnmap for node %u\n", vnn));
|
||||
return -1;
|
||||
@ -843,7 +843,7 @@ static int do_recovery(struct ctdb_recoverd *rec,
|
||||
vnnmap->map = talloc_zero_array(vnnmap, uint32_t, vnnmap->size);
|
||||
for (i=j=0;i<nodemap->num;i++) {
|
||||
if (!(nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE)) {
|
||||
vnnmap->map[j++] = nodemap->nodes[i].vnn;
|
||||
vnnmap->map[j++] = nodemap->nodes[i].pnn;
|
||||
}
|
||||
}
|
||||
|
||||
@ -903,7 +903,7 @@ static int do_recovery(struct ctdb_recoverd *rec,
|
||||
/*
|
||||
if enabled, tell nodes to takeover their public IPs
|
||||
*/
|
||||
if (ctdb->takeover.enabled) {
|
||||
if (ctdb->vnn_list) {
|
||||
ret = ctdb_takeover_run(ctdb, nodemap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to setup public takeover addresses\n"));
|
||||
@ -1053,7 +1053,7 @@ static void unban_all_nodes(struct ctdb_context *ctdb)
|
||||
for (i=0;i<nodemap->num;i++) {
|
||||
if ( (!(nodemap->nodes[i].flags & NODE_FLAGS_DISCONNECTED))
|
||||
&& (nodemap->nodes[i].flags & NODE_FLAGS_BANNED) ) {
|
||||
ctdb_ctrl_modflags(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].vnn, 0, NODE_FLAGS_BANNED);
|
||||
ctdb_ctrl_modflags(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[i].pnn, 0, NODE_FLAGS_BANNED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1166,7 +1166,7 @@ static void monitor_handler(struct ctdb_context *ctdb, uint64_t srvid,
|
||||
ret = ctdb_ctrl_getnodemap(ctdb, CONTROL_TIMEOUT(), CTDB_CURRENT_NODE, tmp_ctx, &nodemap);
|
||||
|
||||
for (i=0;i<nodemap->num;i++) {
|
||||
if (nodemap->nodes[i].vnn == c->vnn) break;
|
||||
if (nodemap->nodes[i].pnn == c->vnn) break;
|
||||
}
|
||||
|
||||
if (i == nodemap->num) {
|
||||
@ -1203,7 +1203,7 @@ static void monitor_handler(struct ctdb_context *ctdb, uint64_t srvid,
|
||||
if (ret == 0 &&
|
||||
ctdb->recovery_master == ctdb->vnn &&
|
||||
ctdb->recovery_mode == CTDB_RECOVERY_NORMAL &&
|
||||
ctdb->takeover.enabled) {
|
||||
ctdb->vnn_list) {
|
||||
/* Only do the takeover run if the perm disabled or unhealthy
|
||||
flags changed since these will cause an ip failover but not
|
||||
a recovery.
|
||||
@ -1284,7 +1284,7 @@ static enum monitor_result verify_recmode(struct ctdb_context *ctdb, struct ctdb
|
||||
}
|
||||
state = ctdb_ctrl_getrecmode_send(ctdb, mem_ctx,
|
||||
CONTROL_TIMEOUT(),
|
||||
nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn);
|
||||
if (state == NULL) {
|
||||
/* we failed to send the control, treat this as
|
||||
an error and try again next iteration
|
||||
@ -1375,7 +1375,7 @@ static enum monitor_result verify_recmaster(struct ctdb_context *ctdb, struct ct
|
||||
}
|
||||
state = ctdb_ctrl_getrecmaster_send(ctdb, mem_ctx,
|
||||
CONTROL_TIMEOUT(),
|
||||
nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn);
|
||||
if (state == NULL) {
|
||||
/* we failed to send the control, treat this as
|
||||
an error and try again next iteration
|
||||
@ -1491,7 +1491,7 @@ again:
|
||||
/* count how many active nodes there are */
|
||||
num_active = 0;
|
||||
for (i=0; i<nodemap->num; i++) {
|
||||
if (rec->banned_nodes[nodemap->nodes[i].vnn] != NULL) {
|
||||
if (rec->banned_nodes[nodemap->nodes[i].pnn] != NULL) {
|
||||
nodemap->nodes[i].flags |= NODE_FLAGS_BANNED;
|
||||
} else {
|
||||
nodemap->nodes[i].flags &= ~NODE_FLAGS_BANNED;
|
||||
@ -1517,7 +1517,7 @@ again:
|
||||
|
||||
/* verify that the recmaster node is still active */
|
||||
for (j=0; j<nodemap->num; j++) {
|
||||
if (nodemap->nodes[j].vnn==recmaster) {
|
||||
if (nodemap->nodes[j].pnn==recmaster) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1529,7 +1529,7 @@ again:
|
||||
}
|
||||
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
DEBUG(0, ("Recmaster node %u no longer available. Force reelection\n", nodemap->nodes[j].vnn));
|
||||
DEBUG(0, ("Recmaster node %u no longer available. Force reelection\n", nodemap->nodes[j].pnn));
|
||||
force_election(rec, mem_ctx, vnn, nodemap);
|
||||
goto again;
|
||||
}
|
||||
@ -1563,7 +1563,7 @@ again:
|
||||
*/
|
||||
switch (verify_recmode(ctdb, nodemap)) {
|
||||
case MONITOR_RECOVERY_NEEDED:
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].vnn);
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
case MONITOR_FAILED:
|
||||
goto again;
|
||||
@ -1582,15 +1582,15 @@ again:
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_getnodemap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_getnodemap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
mem_ctx, &remote_nodemap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to get nodemap from remote node %u\n",
|
||||
nodemap->nodes[j].vnn));
|
||||
nodemap->nodes[j].pnn));
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -1599,8 +1599,8 @@ again:
|
||||
*/
|
||||
if (remote_nodemap->num != nodemap->num) {
|
||||
DEBUG(0, (__location__ " Remote node:%u has different node count. %u vs %u of the local node\n",
|
||||
nodemap->nodes[j].vnn, remote_nodemap->num, nodemap->num));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn, remote_nodemap->num, nodemap->num));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -1608,21 +1608,21 @@ again:
|
||||
active, then that is also a good reason to do recovery
|
||||
*/
|
||||
for (i=0;i<nodemap->num;i++) {
|
||||
if (remote_nodemap->nodes[i].vnn != nodemap->nodes[i].vnn) {
|
||||
if (remote_nodemap->nodes[i].pnn != nodemap->nodes[i].pnn) {
|
||||
DEBUG(0, (__location__ " Remote node:%u has different nodemap vnn for %d (%u vs %u).\n",
|
||||
nodemap->nodes[j].vnn, i,
|
||||
remote_nodemap->nodes[i].vnn, nodemap->nodes[i].vnn));
|
||||
nodemap->nodes[j].pnn, i,
|
||||
remote_nodemap->nodes[i].pnn, nodemap->nodes[i].pnn));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap,
|
||||
vnnmap, nodemap->nodes[j].vnn);
|
||||
vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
if ((remote_nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) !=
|
||||
(nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE)) {
|
||||
DEBUG(0, (__location__ " Remote node:%u has different nodemap flag for %d (0x%x vs 0x%x)\n",
|
||||
nodemap->nodes[j].vnn, i,
|
||||
nodemap->nodes[j].pnn, i,
|
||||
remote_nodemap->nodes[i].flags, nodemap->nodes[i].flags));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap,
|
||||
vnnmap, nodemap->nodes[j].vnn);
|
||||
vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
@ -1656,19 +1656,19 @@ again:
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i=0; i<vnnmap->size; i++) {
|
||||
if (vnnmap->map[i] == nodemap->nodes[j].vnn) {
|
||||
if (vnnmap->map[i] == nodemap->nodes[j].pnn) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == vnnmap->size) {
|
||||
DEBUG(0, (__location__ " Node %u is active in the nodemap but did not exist in the vnnmap\n",
|
||||
nodemap->nodes[j].vnn));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
@ -1681,31 +1681,31 @@ again:
|
||||
if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) {
|
||||
continue;
|
||||
}
|
||||
if (nodemap->nodes[j].vnn == vnn) {
|
||||
if (nodemap->nodes[j].pnn == vnn) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_getvnnmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].vnn,
|
||||
ret = ctdb_ctrl_getvnnmap(ctdb, CONTROL_TIMEOUT(), nodemap->nodes[j].pnn,
|
||||
mem_ctx, &remote_vnnmap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to get vnnmap from remote node %u\n",
|
||||
nodemap->nodes[j].vnn));
|
||||
nodemap->nodes[j].pnn));
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* verify the vnnmap generation is the same */
|
||||
if (vnnmap->generation != remote_vnnmap->generation) {
|
||||
DEBUG(0, (__location__ " Remote node %u has different generation of vnnmap. %u vs %u (ours)\n",
|
||||
nodemap->nodes[j].vnn, remote_vnnmap->generation, vnnmap->generation));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn, remote_vnnmap->generation, vnnmap->generation));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* verify the vnnmap size is the same */
|
||||
if (vnnmap->size != remote_vnnmap->size) {
|
||||
DEBUG(0, (__location__ " Remote node %u has different size of vnnmap. %u vs %u (ours)\n",
|
||||
nodemap->nodes[j].vnn, remote_vnnmap->size, vnnmap->size));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].vnn);
|
||||
nodemap->nodes[j].pnn, remote_vnnmap->size, vnnmap->size));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap, vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -1713,16 +1713,16 @@ again:
|
||||
for (i=0;i<vnnmap->size;i++) {
|
||||
if (remote_vnnmap->map[i] != vnnmap->map[i]) {
|
||||
DEBUG(0, (__location__ " Remote node %u has different vnnmap.\n",
|
||||
nodemap->nodes[j].vnn));
|
||||
nodemap->nodes[j].pnn));
|
||||
do_recovery(rec, mem_ctx, vnn, num_active, nodemap,
|
||||
vnnmap, nodemap->nodes[j].vnn);
|
||||
vnnmap, nodemap->nodes[j].pnn);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* we might need to change who has what IP assigned */
|
||||
if (need_takeover_run && ctdb->takeover.enabled) {
|
||||
if (need_takeover_run && ctdb->vnn_list) {
|
||||
ret = ctdb_takeover_run(ctdb, nodemap);
|
||||
if (ret != 0) {
|
||||
DEBUG(0, (__location__ " Unable to setup public takeover addresses\n"));
|
||||
|
@ -105,14 +105,14 @@ static int ctdb_add_node(struct ctdb_context *ctdb, char *nstr)
|
||||
node->address.address,
|
||||
node->address.port);
|
||||
/* this assumes that the nodes are kept in sorted order, and no gaps */
|
||||
node->vnn = ctdb->num_nodes;
|
||||
node->pnn = ctdb->num_nodes;
|
||||
|
||||
/* nodes start out disconnected */
|
||||
node->flags |= NODE_FLAGS_DISCONNECTED;
|
||||
|
||||
if (ctdb->address.address &&
|
||||
ctdb_same_address(&ctdb->address, &node->address)) {
|
||||
ctdb->vnn = node->vnn;
|
||||
ctdb->vnn = node->pnn;
|
||||
node->flags &= ~NODE_FLAGS_DISCONNECTED;
|
||||
}
|
||||
|
||||
@ -401,7 +401,7 @@ static void ctdb_broadcast_packet_all(struct ctdb_context *ctdb,
|
||||
{
|
||||
int i;
|
||||
for (i=0;i<ctdb->num_nodes;i++) {
|
||||
hdr->destnode = ctdb->nodes[i]->vnn;
|
||||
hdr->destnode = ctdb->nodes[i]->pnn;
|
||||
ctdb_queue_packet(ctdb, hdr);
|
||||
}
|
||||
}
|
||||
@ -428,7 +428,7 @@ static void ctdb_broadcast_packet_connected(struct ctdb_context *ctdb,
|
||||
int i;
|
||||
for (i=0;i<ctdb->num_nodes;i++) {
|
||||
if (!(ctdb->nodes[i]->flags & NODE_FLAGS_DISCONNECTED)) {
|
||||
hdr->destnode = ctdb->nodes[i]->vnn;
|
||||
hdr->destnode = ctdb->nodes[i]->pnn;
|
||||
ctdb_queue_packet(ctdb, hdr);
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -42,7 +42,6 @@ static struct {
|
||||
const char *transport;
|
||||
const char *myaddress;
|
||||
const char *public_address_list;
|
||||
const char *public_interface;
|
||||
const char *event_script_dir;
|
||||
const char *logfile;
|
||||
const char *recovery_lock_file;
|
||||
@ -101,7 +100,6 @@ int main(int argc, const char *argv[])
|
||||
POPT_CTDB_CMDLINE
|
||||
{ "interactive", 'i', POPT_ARG_NONE, &interactive, 0, "don't fork", NULL },
|
||||
{ "public-addresses", 0, POPT_ARG_STRING, &options.public_address_list, 0, "public address list file", "filename" },
|
||||
{ "public-interface", 0, POPT_ARG_STRING, &options.public_interface, 0, "public interface", "interface"},
|
||||
{ "event-script-dir", 0, POPT_ARG_STRING, &options.event_script_dir, 0, "event script directory", "dirname" },
|
||||
{ "logfile", 0, POPT_ARG_STRING, &options.logfile, 0, "log file location", "filename" },
|
||||
{ "nlist", 0, POPT_ARG_STRING, &options.nlist, 0, "node list file", "filename" },
|
||||
@ -200,18 +198,12 @@ int main(int argc, const char *argv[])
|
||||
}
|
||||
}
|
||||
|
||||
if (options.public_interface) {
|
||||
ctdb->takeover.interface = talloc_strdup(ctdb, options.public_interface);
|
||||
CTDB_NO_MEMORY(ctdb, ctdb->takeover.interface);
|
||||
}
|
||||
|
||||
if (options.public_address_list) {
|
||||
ret = ctdb_set_public_addresses(ctdb, options.public_address_list);
|
||||
if (ret == -1) {
|
||||
DEBUG(0,("Unable to setup public address list\n"));
|
||||
exit(1);
|
||||
}
|
||||
ctdb->takeover.enabled = true;
|
||||
}
|
||||
|
||||
ret = ctdb_set_event_script_dir(ctdb, options.event_script_dir);
|
||||
|
@ -75,9 +75,9 @@ static int ctdb_event_script_v(struct ctdb_context *ctdb, const char *fmt, va_li
|
||||
/*
|
||||
the service specific event scripts
|
||||
*/
|
||||
if (stat(ctdb->takeover.event_script_dir, &st) != 0 &&
|
||||
if (stat(ctdb->event_script_dir, &st) != 0 &&
|
||||
errno == ENOENT) {
|
||||
DEBUG(0,("No event script directory found at '%s'\n", ctdb->takeover.event_script_dir));
|
||||
DEBUG(0,("No event script directory found at '%s'\n", ctdb->event_script_dir));
|
||||
talloc_free(tmp_ctx);
|
||||
return -1;
|
||||
}
|
||||
@ -88,9 +88,9 @@ static int ctdb_event_script_v(struct ctdb_context *ctdb, const char *fmt, va_li
|
||||
/* scan all directory entries and insert all valid scripts into the
|
||||
tree
|
||||
*/
|
||||
dir = opendir(ctdb->takeover.event_script_dir);
|
||||
dir = opendir(ctdb->event_script_dir);
|
||||
if (dir == NULL) {
|
||||
DEBUG(0,("Failed to open event script directory '%s'\n", ctdb->takeover.event_script_dir));
|
||||
DEBUG(0,("Failed to open event script directory '%s'\n", ctdb->event_script_dir));
|
||||
talloc_free(tmp_ctx);
|
||||
return -1;
|
||||
}
|
||||
@ -120,7 +120,7 @@ static int ctdb_event_script_v(struct ctdb_context *ctdb, const char *fmt, va_li
|
||||
}
|
||||
|
||||
/* Make sure the event script is executable */
|
||||
str = talloc_asprintf(tree, "%s/%s", ctdb->takeover.event_script_dir, de->d_name);
|
||||
str = talloc_asprintf(tree, "%s/%s", ctdb->event_script_dir, de->d_name);
|
||||
if (stat(str, &st) != 0) {
|
||||
DEBUG(0,("Could not stat event script %s. Ignoring this event script\n", str));
|
||||
continue;
|
||||
@ -152,7 +152,7 @@ static int ctdb_event_script_v(struct ctdb_context *ctdb, const char *fmt, va_li
|
||||
CTDB_NO_MEMORY(ctdb, options);
|
||||
|
||||
cmdstr = talloc_asprintf(tmp_ctx, "%s/%s %s",
|
||||
ctdb->takeover.event_script_dir,
|
||||
ctdb->event_script_dir,
|
||||
script, options);
|
||||
CTDB_NO_MEMORY(ctdb, cmdstr);
|
||||
|
||||
|
@ -286,7 +286,7 @@ static int ctdb_tcp_listen_automatic(struct ctdb_context *ctdb)
|
||||
ctdb->name = talloc_asprintf(ctdb, "%s:%u",
|
||||
ctdb->address.address,
|
||||
ctdb->address.port);
|
||||
ctdb->vnn = ctdb->nodes[i]->vnn;
|
||||
ctdb->vnn = ctdb->nodes[i]->pnn;
|
||||
ctdb->nodes[i]->flags &= ~NODE_FLAGS_DISCONNECTED;
|
||||
DEBUG(1,("ctdb chose network address %s:%u vnn %u\n",
|
||||
ctdb->address.address,
|
||||
|
@ -233,7 +233,7 @@ static int control_status(struct ctdb_context *ctdb, int argc, const char **argv
|
||||
if(options.machinereadable){
|
||||
printf(":Node:IP:Disonnected:Disabled:Permanently Disabled:\n");
|
||||
for(i=0;i<nodemap->num;i++){
|
||||
printf(":%d:%s:%d:%d:%d:\n", nodemap->nodes[i].vnn,
|
||||
printf(":%d:%s:%d:%d:%d:\n", nodemap->nodes[i].pnn,
|
||||
inet_ntoa(nodemap->nodes[i].sin.sin_addr),
|
||||
!!(nodemap->nodes[i].flags&NODE_FLAGS_DISCONNECTED),
|
||||
!!(nodemap->nodes[i].flags&NODE_FLAGS_UNHEALTHY),
|
||||
@ -270,10 +270,10 @@ static int control_status(struct ctdb_context *ctdb, int argc, const char **argv
|
||||
flags_str = talloc_strdup(ctdb, "OK");
|
||||
CTDB_NO_MEMORY_FATAL(ctdb, flags_str);
|
||||
}
|
||||
printf("vnn:%d %-16s %s%s\n", nodemap->nodes[i].vnn,
|
||||
printf("pnn:%d %-16s %s%s\n", nodemap->nodes[i].pnn,
|
||||
inet_ntoa(nodemap->nodes[i].sin.sin_addr),
|
||||
flags_str,
|
||||
nodemap->nodes[i].vnn == myvnn?" (THIS NODE)":"");
|
||||
nodemap->nodes[i].pnn == myvnn?" (THIS NODE)":"");
|
||||
talloc_free(flags_str);
|
||||
}
|
||||
|
||||
@ -315,22 +315,26 @@ static int control_status(struct ctdb_context *ctdb, int argc, const char **argv
|
||||
static int control_get_tickles(struct ctdb_context *ctdb, int argc, const char **argv)
|
||||
{
|
||||
struct ctdb_control_tcp_tickle_list *list;
|
||||
uint32_t vnn;
|
||||
struct sockaddr_in ip;
|
||||
int i, ret;
|
||||
|
||||
if (argc < 1) {
|
||||
usage();
|
||||
}
|
||||
|
||||
vnn = strtoul(argv[0], NULL, 0);
|
||||
ip.sin_family = AF_INET;
|
||||
if (inet_aton(argv[0], &ip.sin_addr) == 0) {
|
||||
DEBUG(0,("Wrongly formed ip address '%s'\n", argv[0]));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = ctdb_ctrl_get_tcp_tickles(ctdb, TIMELIMIT(), options.vnn, ctdb, vnn, &list);
|
||||
ret = ctdb_ctrl_get_tcp_tickles(ctdb, TIMELIMIT(), options.vnn, ctdb, &ip, &list);
|
||||
if (ret == -1) {
|
||||
DEBUG(0, ("Unable to list tickles\n"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("Tickles for vnn:%u\n", list->vnn);
|
||||
printf("Tickles for ip:%s\n", inet_ntoa(list->ip.sin_addr));
|
||||
printf("Num tickles:%u\n", list->tickles.num);
|
||||
for (i=0;i<list->tickles.num;i++) {
|
||||
printf("SRC: %s:%u ", inet_ntoa(list->tickles.connections[i].saddr.sin_addr), ntohs(list->tickles.connections[i].saddr.sin_port));
|
||||
@ -540,17 +544,17 @@ static int control_ip(struct ctdb_context *ctdb, int argc, const char **argv)
|
||||
for(i=0;i<ips->num;i++){
|
||||
printf(":%s:%d:\n",
|
||||
inet_ntoa(ips->ips[i].sin.sin_addr),
|
||||
ips->ips[i].takeover_vnn);
|
||||
ips->ips[i].pnn);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
printf("Number of nodes:%d\n", ips->num);
|
||||
printf("Number of addresses:%d\n", ips->num);
|
||||
for(i=0;i<ips->num;i++){
|
||||
printf("%-16s %d\n",
|
||||
inet_ntoa(ips->ips[i].sin.sin_addr),
|
||||
ips->ips[i].takeover_vnn);
|
||||
ips->ips[i].pnn);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1067,7 +1071,7 @@ static const struct {
|
||||
{ "thaw", control_thaw, true, "thaw all databases" },
|
||||
{ "killtcp", kill_tcp, false, "kill a tcp connection.", "<srcip:port> <dstip:port>" },
|
||||
{ "tickle", tickle_tcp, false, "send a tcp tickle ack", "<srcip:port> <dstip:port>" },
|
||||
{ "gettickles", control_get_tickles, false, "get the list of tickles registered for this vnn", "<vnn>" },
|
||||
{ "gettickles", control_get_tickles, false, "get the list of tickles registered for this ip", "<ip>" },
|
||||
|
||||
{ "regsrvid", regsrvid, false, "register a server id", "<vnn> <type> <id>" },
|
||||
{ "unregsrvid", unregsrvid, false, "unregister a server id", "<vnn> <type> <id>" },
|
||||
|
@ -38,7 +38,6 @@ The most important options are:
|
||||
<ul>
|
||||
<li>CTDB_NODES
|
||||
<li>CTDB_RECOVERY_LOCK
|
||||
<li>CTDB_PUBLIC_INTERFACE
|
||||
<li>CTDB_PUBLIC_ADDRESSES
|
||||
</ul>
|
||||
|
||||
@ -82,34 +81,12 @@ Content of /etc/ctdb/nodes:
|
||||
The default for this file is /etc/ctdb/nodes.
|
||||
|
||||
|
||||
<h3>CTDB_PUBLIC_INTERFACE</h3>
|
||||
|
||||
This parameter is used to tell CTDB which network interface is used to
|
||||
hold the public ip addresses when CTDB is used to manage IP
|
||||
takeover.<p>
|
||||
|
||||
This can be the same network interface as is used for the private
|
||||
addresses in the CTDB_NODES list but it is recommended that you use a
|
||||
different interface.<p>
|
||||
|
||||
Example using eth0 for the public interface:
|
||||
<pre>
|
||||
CTDB_PUBLIC_INTERFACE=eth0
|
||||
</pre>
|
||||
|
||||
It is strongly recommended that you use CTDB with IP takeover.<p>
|
||||
|
||||
When you use this parameter you must also specify the
|
||||
CTDB_PUBLIC_ADDRESSES parameter.
|
||||
|
||||
<h3>CTDB_PUBLIC_ADDRESSES</h3>
|
||||
|
||||
In order to use IP takeover you must specify a file containing a list
|
||||
of public IP addresses. One IP address for each node.<p>
|
||||
This file specifies a list of public ip addresses which the cluster will
|
||||
serve. This file must be the same on all nodes.<p>
|
||||
|
||||
|
||||
This file contains a list of public cluster addresses.<p>
|
||||
|
||||
These are the addresses that the SMBD daemons and other services will
|
||||
bind to and which clients will use to connect to the cluster. This
|
||||
file must contain one address for each node, i.e. it must have the
|
||||
@ -122,10 +99,10 @@ Example 4 node cluster:
|
||||
</pre>
|
||||
Content of /etc/ctdb/public_addresses:
|
||||
<pre>
|
||||
192.168.1.1/24
|
||||
192.168.1.2/24
|
||||
192.168.2.1/24
|
||||
192.168.2.2/24
|
||||
192.168.1.1/24 eth0
|
||||
192.168.1.2/24 eth0
|
||||
192.168.2.1/24 eth1
|
||||
192.168.2.2/24 eth1
|
||||
</pre>
|
||||
|
||||
These are the IP addresses that you should configure in DNS for the
|
||||
@ -138,28 +115,19 @@ cluster.<p>
|
||||
|
||||
The CTDB cluster utilizes IP takeover techniques to ensure that as long as at least one node in the cluster is available, all the public IP addresses will always be available to clients.<p>
|
||||
|
||||
This means that if one physical node fails, the public address of that
|
||||
node will be taken over by a different node in the cluster. This
|
||||
This means that if one physical node fails, the public addresses that
|
||||
node was serving will be taken over by a different node in the cluster. This
|
||||
provides a guarantee that all ip addresses exposed to clients will
|
||||
always be reachable by clients even if a node has been powered off or
|
||||
has crashed.<p>
|
||||
|
||||
CTDB nodes will only take over IP addresses that are inside the same
|
||||
subnet as its own public IP address. In the example above, nodes 0 and
|
||||
1 would be able to take over each others public ip and analog for
|
||||
nodes 2 and 3, but node 0 and 1 would NOT be able to take over the IP
|
||||
addresses for nodes 2 or 3 since they are on a different
|
||||
subnet.<p>
|
||||
|
||||
Do not assign these addresses to any of the interfaces on the
|
||||
host. CTDB will add and remove these addresses automatically at
|
||||
runtime.<p>
|
||||
|
||||
This parameter is used when CTDB operated in takeover ip mode.<p>
|
||||
|
||||
The usual location for this file is /etc/ctdb/public_addresses. If you
|
||||
use this you <strong>must</strong> also specify the
|
||||
CTDB_PUBLIC_INTERFACE parameter.<p>
|
||||
The usual location for this file is /etc/ctdb/public_addresses.
|
||||
|
||||
<h2>Event scripts</h2>
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user