diff --git a/ctdb/doc/ctdb.1 b/ctdb/doc/ctdb.1 index 91c1b645e2f..56b235343bb 100644 --- a/ctdb/doc/ctdb.1 +++ b/ctdb/doc/ctdb.1 @@ -1,11 +1,11 @@ .\" Title: ctdb .\" Author: .\" Generator: DocBook XSL Stylesheets v1.73.2 -.\" Date: 03/24/2009 +.\" Date: 06/02/2009 .\" Manual: .\" Source: .\" -.TH "CTDB" "1" "03/24/2009" "" "" +.TH "CTDB" "1" "06/02/2009" "" "" .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) @@ -469,9 +469,9 @@ This command will kill the specified TCP connection by issuing a TCP RST to the This command will send out a gratious arp for the specified interface through the specified interface\. This command is mainly used by the ctdb eventscripts\. .SS "reloadnodes" .PP -This command is used when adding new nodes to an existing cluster and to reduce the disruption of this operation\. This command should never be used except when expanding an existing cluster\. This can only be used to expand a cluster\. To remove a node from the cluster you still need to shut down ctdb on all nodes, edit the nodes file and restart ctdb\. +This command is used when adding new nodes, or removing existing nodes from an existing cluster\. .PP -Procedure: +Procedure to add a node: .PP 1, To expand an existing cluster, first ensure with \'ctdb status\' that all nodes are up and running and that they are all healthy\. Do not try to expand a cluster unless it is completely healthy! .PP @@ -479,11 +479,24 @@ Procedure: .PP 3, Verify that all the nodes have identical /etc/ctdb/nodes files after you edited them and added the new node! .PP -4, Run \'ctdb reloadnodes\' to force all nodes to reaload the nodesfile\. +4, Run \'ctdb reloadnodes\' to force all nodes to reload the nodesfile\. .PP 5, Use \'ctdb status\' on all nodes and verify that they now show the additional node\. .PP 6, Install and configure the new node and bring it online\. +.PP +Procedure to remove a node: +.PP +1, To remove a node from an existing cluster, first ensure with \'ctdb status\' that all nodes, except the node to be deleted, are up and running and that they are all healthy\. Do not try to remove a node from a cluster unless the cluster is completely healthy! +.PP +2, Shutdown and poerwoff the node to be removed\. +.PP +3, On all other nodes, edit the /etc/ctdb/nodes file and comment out the node to be removed\. Do not delete the line for that node, just comment it out by adding a \'#\' at the beginning of the line\. +.PP +4, Run \'ctdb reloadnodes\' to force all nodes to reload the nodesfile\. +.PP +5, Use \'ctdb status\' on all nodes and verify that the deleted node no longer shows up in the list\.\. +.PP .SS "tickle " .PP This command will will send a TCP tickle to the source host for the specified TCP connection\. A TCP tickle is a TCP ACK packet with an invalid sequence and acknowledge number and will when received by the source host result in it sending an immediate correct ACK back to the other end\. diff --git a/ctdb/doc/ctdb.1.html b/ctdb/doc/ctdb.1.html index f75d28ca72e..7b20c877eaf 100644 --- a/ctdb/doc/ctdb.1.html +++ b/ctdb/doc/ctdb.1.html @@ -367,14 +367,9 @@ CTDB version 1 through the specified interface. This command is mainly used by the ctdb eventscripts.

reloadnodes

- This command is used when adding new nodes to an existing cluster and - to reduce the disruption of this operation. This command should never - be used except when expanding an existing cluster. - This can only be used to expand a cluster. To remove a node from the - cluster you still need to shut down ctdb on all nodes, edit the nodes file - and restart ctdb. + This command is used when adding new nodes, or removing existing nodes from an existing cluster.

- Procedure: + Procedure to add a node:

1, To expand an existing cluster, first ensure with 'ctdb status' that all nodes are up and running and that they are all healthy. @@ -385,12 +380,27 @@ CTDB version 1

3, Verify that all the nodes have identical /etc/ctdb/nodes files after you edited them and added the new node!

- 4, Run 'ctdb reloadnodes' to force all nodes to reaload the nodesfile. + 4, Run 'ctdb reloadnodes' to force all nodes to reload the nodesfile.

5, Use 'ctdb status' on all nodes and verify that they now show the additional node.

6, Install and configure the new node and bring it online. -

tickle <srcip:port> <dstip:port>

+

+ Procedure to remove a node: +

+ 1, To remove a node from an existing cluster, first ensure with 'ctdb status' that + all nodes, except the node to be deleted, are up and running and that they are all healthy. + Do not try to remove a node from a cluster unless the cluster is completely healthy! +

+ 2, Shutdown and poerwoff the node to be removed. +

+ 3, On all other nodes, edit the /etc/ctdb/nodes file and comment out the node to be removed. Do not delete the line for that node, just comment it out by adding a '#' at the beginning of the line. +

+ 4, Run 'ctdb reloadnodes' to force all nodes to reload the nodesfile. +

+ 5, Use 'ctdb status' on all nodes and verify that the deleted node no longer shows up in the list.. +

+

tickle <srcip:port> <dstip:port>

This command will will send a TCP tickle to the source host for the specified TCP connection. A TCP tickle is a TCP ACK packet with an invalid sequence and @@ -402,10 +412,10 @@ CTDB version 1 TCP connection has been disrupted and that the client will need to reestablish. This greatly speeds up the time it takes for a client to detect and reestablish after an IP failover in the ctdb cluster. -

gettickles <ip>

+

gettickles <ip>

This command is used to show which TCP connections are registered with CTDB to be "tickled" if there is a failover. -

repack [max_freelist]

+

repack [max_freelist]

Over time, when records are created and deleted in a TDB, the TDB list of free space will become fragmented. This can lead to a slowdown in accessing TDB records. This command is used to defragment a TDB database and pruning the freelist.

@@ -420,7 +430,7 @@ CTDB version 1 Example: ctdb repack 1000

By default, this operation is issued from the 00.ctdb event script every 5 minutes. -

vacuum [max_records]

+

vacuum [max_records]

Over time CTDB databases will fill up with empty deleted records which will lead to a progressive slow down of CTDB database access. This command is used to prune all databases and delete all empty records from the cluster.

@@ -436,12 +446,12 @@ CTDB version 1 Example: ctdb vacuum

By default, this operation is issued from the 00.ctdb event script every 5 minutes. -

Debugging Commands

+

Debugging Commands

These commands are primarily used for CTDB development and testing and should not be used for normal administration. -

process-exists <pid>

+

process-exists <pid>

This command checks if a specific process exists on the CTDB host. This is mainly used by Samba to check if remote instances of samba are still running or not. -

getdbmap

+

getdbmap

This command lists all clustered TDB databases that the CTDB daemon has attached to. Some databases are flagged as PERSISTENT, this means that the database stores data persistently and the data will remain across reboots. One example of such a database is secrets.tdb where information about how the cluster was joined to the domain is stored.

Most databases are not persistent and only store the state information that the currently running samba daemons need. These databases are always wiped when ctdb/samba starts and when a node is rebooted. @@ -460,25 +470,25 @@ dbid:0x2672a57f name:idmap2.tdb path:/var/ctdb/persistent/idmap2.tdb.0 PERSISTEN dbid:0xb775fff6 name:secrets.tdb path:/var/ctdb/persistent/secrets.tdb.0 PERSISTENT dbid:0xe98e08b6 name:group_mapping.tdb path:/var/ctdb/persistent/group_mapping.tdb.0 PERSISTENT dbid:0x7bbbd26c name:passdb.tdb path:/var/ctdb/persistent/passdb.tdb.0 PERSISTENT -

catdb <dbname>

+

catdb <dbname>

This command will dump a clustered TDB database to the screen. This is a debugging command. -

getmonmode

+

getmonmode

This command returns the monutoring mode of a node. The monitoring mode is either ACTIVE or DISABLED. Normally a node will continously monitor that all other nodes that are expected are in fact connected and that they respond to commands.

ACTIVE - This is the normal mode. The node is actively monitoring all other nodes, both that the transport is connected and also that the node responds to commands. If a node becomes unavailable, it will be marked as DISCONNECTED and a recovery is initiated to restore the cluster.

DISABLED - This node is not monitoring that other nodes are available. In this mode a node failure will not be detected and no recovery will be performed. This mode is useful when for debugging purposes one wants to attach GDB to a ctdb process but wants to prevent the rest of the cluster from marking this node as DISCONNECTED and do a recovery. -

setmonmode <0|1>

+

setmonmode <0|1>

This command can be used to explicitely disable/enable monitoring mode on a node. The main purpose is if one wants to attach GDB to a running ctdb daemon but wants to prevent the other nodes from marking it as DISCONNECTED and issuing a recovery. To do this, set monitoring mode to 0 on all nodes before attaching with GDB. Remember to set monitoring mode back to 1 afterwards. -

attach <dbname>

+

attach <dbname>

This is a debugging command. This command will make the CTDB daemon create a new CTDB database and attach to it. -

dumpmemory

+

dumpmemory

This is a debugging command. This command will make the ctdb daemon to write a fill memory allocation map to standard output. -

rddumpmemory

+

rddumpmemory

This is a debugging command. This command will dump the talloc memory allocation tree for the recovery daemon to standard output. -

freeze

+

freeze

This command will lock all the local TDB databases causing clients that are accessing these TDBs such as samba3 to block until the databases are thawed. @@ -486,20 +496,20 @@ dbid:0x7bbbd26c name:passdb.tdb path:/var/ctdb/persistent/passdb.tdb.0 PERSISTEN This is primarily used by the recovery daemon to stop all samba daemons from accessing any databases while the database is recovered and rebuilt. -

thaw

+

thaw

Thaw a previously frozen node. -

eventscript <arguments>

+

eventscript <arguments>

This is a debugging command. This command can be used to manually invoke and run the eventscritps with arbitrary arguments. -

backupdb <database> <file>

+

backupdb <database> <file>

This command can be used to copy the entire content of a database out to a file. This file can later be read back into ctdb using the restoredb command. This is mainly useful for backing up persistent databases such as secrets.tdb and similar. -

restoredb <file>

+

restoredb <file>

This command restores a persistent database that was previously backed up using backupdb. -

SEE ALSO

+

SEE ALSO

ctdbd(1), onnode(1) http://ctdb.samba.org/ -

COPYRIGHT/LICENSE


+

COPYRIGHT/LICENSE


Copyright (C) Andrew Tridgell 2007
Copyright (C) Ronnie sahlberg 2007

diff --git a/ctdb/doc/ctdb.1.xml b/ctdb/doc/ctdb.1.xml index a88e8ad9ccb..7b327fbecd3 100644 --- a/ctdb/doc/ctdb.1.xml +++ b/ctdb/doc/ctdb.1.xml @@ -679,15 +679,10 @@ CTDB version 1 reloadnodes - This command is used when adding new nodes to an existing cluster and - to reduce the disruption of this operation. This command should never - be used except when expanding an existing cluster. - This can only be used to expand a cluster. To remove a node from the - cluster you still need to shut down ctdb on all nodes, edit the nodes file - and restart ctdb. + This command is used when adding new nodes, or removing existing nodes from an existing cluster. - Procedure: + Procedure to add a node: 1, To expand an existing cluster, first ensure with 'ctdb status' that @@ -702,7 +697,7 @@ CTDB version 1 3, Verify that all the nodes have identical /etc/ctdb/nodes files after you edited them and added the new node! - 4, Run 'ctdb reloadnodes' to force all nodes to reaload the nodesfile. + 4, Run 'ctdb reloadnodes' to force all nodes to reload the nodesfile. 5, Use 'ctdb status' on all nodes and verify that they now show the additional node. @@ -710,6 +705,29 @@ CTDB version 1 6, Install and configure the new node and bring it online. + + Procedure to remove a node: + + + 1, To remove a node from an existing cluster, first ensure with 'ctdb status' that + all nodes, except the node to be deleted, are up and running and that they are all healthy. + Do not try to remove a node from a cluster unless the cluster is completely healthy! + + + 2, Shutdown and poerwoff the node to be removed. + + + 3, On all other nodes, edit the /etc/ctdb/nodes file and comment out the node to be removed. Do not delete the line for that node, just comment it out by adding a '#' at the beginning of the line. + + + 4, Run 'ctdb reloadnodes' to force all nodes to reload the nodesfile. + + + 5, Use 'ctdb status' on all nodes and verify that the deleted node no longer shows up in the list.. + + + + tickle <srcip:port> <dstip:port> diff --git a/ctdb/include/ctdb.h b/ctdb/include/ctdb.h index 866ba76e2a8..ea4bcae109e 100644 --- a/ctdb/include/ctdb.h +++ b/ctdb/include/ctdb.h @@ -101,6 +101,11 @@ struct ctdb_call_info { */ #define CTDB_SRVID_PUSH_NODE_FLAGS 0xF900000000000000LL +/* + a message ID to get the recovery daemon to reload the nodes file + */ +#define CTDB_SRVID_RELOAD_NODES 0xFA00000000000000LL + /* used on the domain socket, send a pdu to the local daemon */ diff --git a/ctdb/include/ctdb_private.h b/ctdb/include/ctdb_private.h index ff007a8ac61..98dab0785db 100644 --- a/ctdb/include/ctdb_private.h +++ b/ctdb/include/ctdb_private.h @@ -198,7 +198,8 @@ struct ctdb_node { #define NODE_FLAGS_PERMANENTLY_DISABLED 0x00000004 /* administrator has disabled node */ #define NODE_FLAGS_BANNED 0x00000008 /* recovery daemon has banned the node */ #define NODE_FLAGS_DISABLED (NODE_FLAGS_UNHEALTHY|NODE_FLAGS_PERMANENTLY_DISABLED) -#define NODE_FLAGS_INACTIVE (NODE_FLAGS_DISCONNECTED|NODE_FLAGS_BANNED) +#define NODE_FLAGS_DELETED 0x00000010 /* this node has been deleted */ +#define NODE_FLAGS_INACTIVE (NODE_FLAGS_DELETED|NODE_FLAGS_DISCONNECTED|NODE_FLAGS_BANNED) uint32_t flags; /* used by the dead node monitoring */ diff --git a/ctdb/packaging/RPM/ctdb.spec b/ctdb/packaging/RPM/ctdb.spec index 029d742e494..b2f7efa4873 100644 --- a/ctdb/packaging/RPM/ctdb.spec +++ b/ctdb/packaging/RPM/ctdb.spec @@ -4,7 +4,7 @@ Summary: Clustered TDB Vendor: Samba Team Packager: Samba Team Name: ctdb -Version: 1.0.82 +Version: 1.0.83 Release: 1 Epoch: 0 License: GNU GPL version 3 @@ -131,6 +131,30 @@ fi %{_libdir}/pkgconfig/ctdb.pc %changelog +* Tue Jun 2 2009 : Version 1.0.83 + - Document how to remove a ndoe from a running cluster. + - Hide all deleted nodes from ctdb output. + - Lower the loglevel on some eventscript related items + - Dont queue packets to deleted nodes + - When building initial vnnmap, ignode any nonexisting nodes + - Add a new nodestate : DELETED that is used when deleting a node from an + existing cluster. + - dont remove the ctdb socket when shutting down. This prevents a race in the + initscripts when restarting ctdb quickly after stopping it. + - TDB nesting reworked. + - Remove obsolete ipmux + - From Flavio Carmo Junior: Add eventscript and documentation for ClamAV antivirus engine + - From Sumit Bose: fix the regex in the test to handle the new ctdb + statistics output that was recently added. + - change the socket type we use for grauitious arps from the obsolete + AF_INET/SOCK_PACKET to instead use PF_PACKET/SOCK_RAW. + - Check return codes for some functions, from Sumit Bose, based on codereview by Jim Meyering. + - Sumit Bose: Remove structure memeber node_list_file that is no longer used. + - Sumit Bose: fix configure warning for netfilter.h + - Updates to the webpages by Volker. + - Remove error messages about missing /var/log/log.ctdb file from ctdb_diagnostics.sh from christian Ambach + - Additional error logs if hte eventscript switching from dameon to client mode fails. + - track how long it takes for ctdbd and the recovery daemon to perform the rec-lock fcntl() lock attemt and show this in the ctdb statistics output. * Thu May 14 2009 : Version 1.0.82 - Update the "ctdb lvsmaster" command to return -1 on error. - Add a -Y flag to "ctdb lvsmaster" diff --git a/ctdb/server/ctdb_keepalive.c b/ctdb/server/ctdb_keepalive.c index 524feb16966..dfe7cfc6221 100644 --- a/ctdb/server/ctdb_keepalive.c +++ b/ctdb/server/ctdb_keepalive.c @@ -37,6 +37,11 @@ static void ctdb_check_for_dead_nodes(struct event_context *ev, struct timed_eve /* send a keepalive to all other nodes, unless */ for (i=0;inum_nodes;i++) { struct ctdb_node *node = ctdb->nodes[i]; + + if (node->flags & NODE_FLAGS_DELETED) { + continue; + } + if (node->pnn == ctdb->pnn) { continue; } diff --git a/ctdb/server/ctdb_recover.c b/ctdb/server/ctdb_recover.c index b4428fa4946..7953c6b4473 100644 --- a/ctdb/server/ctdb_recover.c +++ b/ctdb/server/ctdb_recover.c @@ -242,6 +242,10 @@ ctdb_reload_nodes_event(struct event_context *ev, struct timed_event *te, continue; } + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } + /* any new or different nodes must be added */ if (ctdb->methods->add_node(ctdb->nodes[i]) != 0) { DEBUG(DEBUG_CRIT, (__location__ " methods->add_node failed at %d\n", i)); @@ -253,6 +257,9 @@ ctdb_reload_nodes_event(struct event_context *ev, struct timed_event *te, } } + /* tell the recovery daemon to reaload the nodes file too */ + ctdb_daemon_send_message(ctdb, ctdb->pnn, CTDB_SRVID_RELOAD_NODES, tdb_null); + talloc_free(tmp_ctx); return; } diff --git a/ctdb/server/ctdb_recoverd.c b/ctdb/server/ctdb_recoverd.c index 995284fbb22..6b2fb5e5556 100644 --- a/ctdb/server/ctdb_recoverd.c +++ b/ctdb/server/ctdb_recoverd.c @@ -1332,12 +1332,6 @@ static int do_recovery(struct ctdb_recoverd *rec, DEBUG(DEBUG_NOTICE, (__location__ " Starting do_recovery\n")); - if (ctdb->num_nodes != nodemap->num) { - DEBUG(DEBUG_ERR, (__location__ " ctdb->num_nodes (%d) != nodemap->num (%d) reloading nodes file\n", ctdb->num_nodes, nodemap->num)); - reload_nodes_file(ctdb); - return -1; - } - /* if recovery fails, force it again */ rec->need_recovery = true; @@ -1803,6 +1797,21 @@ DEBUG(DEBUG_ERR, ("recovery master memory dump\n")); talloc_free(tmp_ctx); } +/* + handler for reload_nodes +*/ +static void reload_nodes_handler(struct ctdb_context *ctdb, uint64_t srvid, + TDB_DATA data, void *private_data) +{ + struct ctdb_recoverd *rec = talloc_get_type(private_data, struct ctdb_recoverd); + + DEBUG(DEBUG_ERR, (__location__ " Reload nodes file from recovery daemon\n")); + + reload_nodes_file(rec->ctdb); +} + + + /* handler for recovery master elections */ @@ -2371,6 +2380,9 @@ static void monitor_cluster(struct ctdb_context *ctdb) /* register a message port for vacuum fetch */ ctdb_set_message_handler(ctdb, CTDB_SRVID_VACUUM_FETCH, vacuum_fetch_handler, rec); + /* register a message port for reloadnodes */ + ctdb_set_message_handler(ctdb, CTDB_SRVID_RELOAD_NODES, reload_nodes_handler, rec); + again: if (mem_ctx) { talloc_free(mem_ctx); @@ -2591,14 +2603,16 @@ again: goto again; } for (j=0; jnum; j++) { - if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) { - continue; - } /* release any existing data */ if (ctdb->nodes[j]->public_ips) { talloc_free(ctdb->nodes[j]->public_ips); ctdb->nodes[j]->public_ips = NULL; } + + if (nodemap->nodes[j].flags & NODE_FLAGS_INACTIVE) { + continue; + } + /* grab a new shiny list of public ips from the node */ if (ctdb_ctrl_get_public_ips(ctdb, CONTROL_TIMEOUT(), ctdb->nodes[j]->pnn, diff --git a/ctdb/server/ctdb_server.c b/ctdb/server/ctdb_server.c index bd1d7ed6215..59ed37c6008 100644 --- a/ctdb/server/ctdb_server.c +++ b/ctdb/server/ctdb_server.c @@ -45,6 +45,9 @@ int ctdb_ip_to_nodeid(struct ctdb_context *ctdb, const char *nodeip) int nodeid; for (nodeid=0;nodeidnum_nodes;nodeid++) { + if (ctdb->nodes[nodeid]->flags & NODE_FLAGS_DELETED) { + continue; + } if (!strcmp(ctdb->nodes[nodeid]->address.address, nodeip)) { return nodeid; } @@ -89,7 +92,7 @@ int ctdb_set_tdb_dir_persistent(struct ctdb_context *ctdb, const char *dir) } /* - add a node to the list of active nodes + add a node to the list of nodes */ static int ctdb_add_node(struct ctdb_context *ctdb, char *nstr) { @@ -136,6 +139,46 @@ static int ctdb_add_node(struct ctdb_context *ctdb, char *nstr) return 0; } +/* + add an entry for a "deleted" node to the list of nodes. + a "deleted" node is a node that is commented out from the nodes file. + this is used to prevent that subsequent nodes in the nodes list + change their pnn value if a node is "delete" by commenting it out and then + using "ctdb reloadnodes" at runtime. +*/ +static int ctdb_add_deleted_node(struct ctdb_context *ctdb) +{ + struct ctdb_node *node, **nodep; + + nodep = talloc_realloc(ctdb, ctdb->nodes, struct ctdb_node *, ctdb->num_nodes+1); + CTDB_NO_MEMORY(ctdb, nodep); + + ctdb->nodes = nodep; + nodep = &ctdb->nodes[ctdb->num_nodes]; + (*nodep) = talloc_zero(ctdb->nodes, struct ctdb_node); + CTDB_NO_MEMORY(ctdb, *nodep); + node = *nodep; + + if (ctdb_parse_address(ctdb, node, "0.0.0.0", &node->address) != 0) { + DEBUG(DEBUG_ERR,("Failed to setup deleted node %d\n", ctdb->num_nodes)); + return -1; + } + node->ctdb = ctdb; + node->name = talloc_strdup(node, "0.0.0.0:0"); + + /* this assumes that the nodes are kept in sorted order, and no gaps */ + node->pnn = ctdb->num_nodes; + + /* this node is permanently deleted/disconnected */ + node->flags = NODE_FLAGS_DELETED|NODE_FLAGS_DISCONNECTED; + + ctdb->num_nodes++; + node->dead_count = 0; + + return 0; +} + + /* setup the node list from a file */ @@ -143,7 +186,7 @@ int ctdb_set_nlist(struct ctdb_context *ctdb, const char *nlist) { char **lines; int nlines; - int i; + int i, j, num_present; talloc_free(ctdb->nodes); ctdb->nodes = NULL; @@ -158,7 +201,8 @@ int ctdb_set_nlist(struct ctdb_context *ctdb, const char *nlist) nlines--; } - for (i=0;ivnn_map = talloc(ctdb, struct ctdb_vnn_map); CTDB_NO_MEMORY(ctdb, ctdb->vnn_map); ctdb->vnn_map->generation = INVALID_GENERATION; - ctdb->vnn_map->size = ctdb->num_nodes; + ctdb->vnn_map->size = num_present; ctdb->vnn_map->map = talloc_array(ctdb->vnn_map, uint32_t, ctdb->vnn_map->size); CTDB_NO_MEMORY(ctdb, ctdb->vnn_map->map); - for(i=0;ivnn_map->size;i++) { - ctdb->vnn_map->map[i] = i; + for(i=0, j=0; i < ctdb->vnn_map->size; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } + ctdb->vnn_map->map[j] = i; + j++; } talloc_free(lines); @@ -219,9 +274,8 @@ uint32_t ctdb_get_num_active_nodes(struct ctdb_context *ctdb) { int i; uint32_t count=0; - for (i=0;ivnn_map->size;i++) { - struct ctdb_node *node = ctdb->nodes[ctdb->vnn_map->map[i]]; - if (!(node->flags & NODE_FLAGS_INACTIVE)) { + for (i=0; i < ctdb->num_nodes; i++) { + if (!(ctdb->nodes[i]->flags & NODE_FLAGS_INACTIVE)) { count++; } } @@ -437,7 +491,10 @@ static void ctdb_broadcast_packet_all(struct ctdb_context *ctdb, struct ctdb_req_header *hdr) { int i; - for (i=0;inum_nodes;i++) { + for (i=0; i < ctdb->num_nodes; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } hdr->destnode = ctdb->nodes[i]->pnn; ctdb_queue_packet(ctdb, hdr); } @@ -463,7 +520,10 @@ static void ctdb_broadcast_packet_connected(struct ctdb_context *ctdb, struct ctdb_req_header *hdr) { int i; - for (i=0;inum_nodes;i++) { + for (i=0; i < ctdb->num_nodes; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } if (!(ctdb->nodes[i]->flags & NODE_FLAGS_DISCONNECTED)) { hdr->destnode = ctdb->nodes[i]->pnn; ctdb_queue_packet(ctdb, hdr); @@ -500,7 +560,12 @@ void ctdb_queue_packet(struct ctdb_context *ctdb, struct ctdb_req_header *hdr) node = ctdb->nodes[hdr->destnode]; - if (hdr->destnode == ctdb->pnn) { + if (node->flags & NODE_FLAGS_DELETED) { + DEBUG(DEBUG_ERR, (__location__ " Can not queue packet to DELETED node %d\n", hdr->destnode)); + return; + } + + if (node->pnn == ctdb->pnn) { ctdb_defer_packet(ctdb, hdr); } else { if (ctdb->methods == NULL) { diff --git a/ctdb/server/ctdb_takeover.c b/ctdb/server/ctdb_takeover.c index 9eac660e8d9..21f7dc84899 100644 --- a/ctdb/server/ctdb_takeover.c +++ b/ctdb/server/ctdb_takeover.c @@ -673,6 +673,10 @@ create_merged_ip_list(struct ctdb_context *ctdb, TALLOC_CTX *tmp_ctx) for (i=0;inum_nodes;i++) { public_ips = ctdb->nodes[i]->public_ips; + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } + /* there were no public ips for this node */ if (public_ips == NULL) { continue; diff --git a/ctdb/server/eventscript.c b/ctdb/server/eventscript.c index 14cd190693e..cc5a89fe7ec 100644 --- a/ctdb/server/eventscript.c +++ b/ctdb/server/eventscript.c @@ -405,7 +405,7 @@ static int ctdb_event_script_v(struct ctdb_context *ctdb, const char *options) continue; } if (!(st.st_mode & S_IXUSR)) { - DEBUG(DEBUG_ERR,("Event script %s is not executable. Ignoring this event script\n", str)); + DEBUG(DEBUG_INFO,("Event script %s is not executable. Ignoring this event script\n", str)); continue; } diff --git a/ctdb/tcp/tcp_connect.c b/ctdb/tcp/tcp_connect.c index 9d28d48a1f0..fc169e70b70 100644 --- a/ctdb/tcp/tcp_connect.c +++ b/ctdb/tcp/tcp_connect.c @@ -296,7 +296,11 @@ static int ctdb_tcp_listen_automatic(struct ctdb_context *ctdb) return -1; } - for (i=0;inum_nodes;i++) { + for (i=0; i < ctdb->num_nodes; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } + /* if node_ip is specified we will only try to bind to that ip. */ diff --git a/ctdb/tcp/tcp_init.c b/ctdb/tcp/tcp_init.c index c0606f0ec54..58ed6c8a28b 100644 --- a/ctdb/tcp/tcp_init.c +++ b/ctdb/tcp/tcp_init.c @@ -69,7 +69,10 @@ static int ctdb_tcp_initialise(struct ctdb_context *ctdb) exit(1); } - for (i=0; inum_nodes; i++) { + for (i=0; i < ctdb->num_nodes; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } if (ctdb_tcp_add_node(ctdb->nodes[i]) != 0) { DEBUG(DEBUG_CRIT, ("methods->add_node failed at %d\n", i)); return -1; @@ -135,7 +138,10 @@ static int ctdb_tcp_start(struct ctdb_context *ctdb) { int i; - for (i=0; inum_nodes; i++) { + for (i=0; i < ctdb->num_nodes; i++) { + if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { + continue; + } ctdb_tcp_connect_node(ctdb->nodes[i]); } diff --git a/ctdb/tools/ctdb.c b/ctdb/tools/ctdb.c index 57e1a8dffe3..118b309d754 100644 --- a/ctdb/tools/ctdb.c +++ b/ctdb/tools/ctdb.c @@ -82,6 +82,10 @@ static void verify_node(struct ctdb_context *ctdb) DEBUG(DEBUG_ERR, ("Node %u does not exist\n", options.pnn)); exit(ERR_NONODE); } + if (nodemap->nodes[options.pnn].flags & NODE_FLAGS_DELETED) { + DEBUG(DEBUG_ERR, ("Node %u is DELETED\n", options.pnn)); + exit(ERR_DISNODE); + } if (nodemap->nodes[options.pnn].flags & NODE_FLAGS_DISCONNECTED) { DEBUG(DEBUG_ERR, ("Node %u is DISCONNECTED\n", options.pnn)); exit(ERR_DISNODE); @@ -472,6 +476,9 @@ static int control_status(struct ctdb_context *ctdb, int argc, const char **argv if(options.machinereadable){ printf(":Node:IP:Disconnected:Banned:Disabled:Unhealthy:\n"); for(i=0;inum;i++){ + if (nodemap->nodes[i].flags & NODE_FLAGS_DELETED) { + continue; + } printf(":%d:%s:%d:%d:%d:%d:\n", nodemap->nodes[i].pnn, ctdb_addr_to_str(&nodemap->nodes[i].addr), !!(nodemap->nodes[i].flags&NODE_FLAGS_DISCONNECTED), @@ -492,9 +499,14 @@ static int control_status(struct ctdb_context *ctdb, int argc, const char **argv { NODE_FLAGS_PERMANENTLY_DISABLED, "DISABLED" }, { NODE_FLAGS_BANNED, "BANNED" }, { NODE_FLAGS_UNHEALTHY, "UNHEALTHY" }, + { NODE_FLAGS_DELETED, "DELETED" }, }; char *flags_str = NULL; int j; + + if (nodemap->nodes[i].flags & NODE_FLAGS_DELETED) { + continue; + } for (j=0;jnodes[i].flags & flag_names[j].flag) { if (flags_str == NULL) { @@ -644,6 +656,9 @@ static int control_natgwlist(struct ctdb_context *ctdb, int argc, const char **a /* print the pruned list of nodes belonging to this natgw list */ for(i=0;inum;i++){ + if (nodemap->nodes[i].flags & NODE_FLAGS_DELETED) { + continue; + } printf(":%d:%s:%d:%d:%d:%d:\n", nodemap->nodes[i].pnn, ctdb_addr_to_str(&nodemap->nodes[i].addr), !!(nodemap->nodes[i].flags&NODE_FLAGS_DISCONNECTED), @@ -925,6 +940,9 @@ control_get_all_public_ips(struct ctdb_context *ctdb, TALLOC_CTX *tmp_ctx, struc ip_tree = trbt_create(tmp_ctx, 0); for(i=0;inum;i++){ + if (nodemap->nodes[i].flags & NODE_FLAGS_DELETED) { + continue; + } if (nodemap->nodes[i].flags & NODE_FLAGS_DISCONNECTED) { continue; } @@ -986,7 +1004,7 @@ find_other_host_for_public_ip(struct ctdb_context *ctdb, ctdb_sock_addr *addr) } for(i=0;inum;i++){ - if (nodemap->nodes[i].flags & NODE_FLAGS_DISCONNECTED) { + if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) { continue; } if (nodemap->nodes[i].pnn == options.pnn) { @@ -1103,7 +1121,7 @@ static int control_delip_all(struct ctdb_context *ctdb, int argc, const char **a /* remove it from the nodes that are not hosting the ip currently */ for(i=0;inum;i++){ - if (nodemap->nodes[i].flags & NODE_FLAGS_DISCONNECTED) { + if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) { continue; } if (ctdb_ctrl_get_public_ips(ctdb, TIMELIMIT(), nodemap->nodes[i].pnn, tmp_ctx, &ips) != 0) { @@ -1131,7 +1149,7 @@ static int control_delip_all(struct ctdb_context *ctdb, int argc, const char **a /* remove it from every node (also the one hosting it) */ for(i=0;inum;i++){ - if (nodemap->nodes[i].flags & NODE_FLAGS_DISCONNECTED) { + if (nodemap->nodes[i].flags & NODE_FLAGS_INACTIVE) { continue; } if (ctdb_ctrl_get_public_ips(ctdb, TIMELIMIT(), nodemap->nodes[i].pnn, tmp_ctx, &ips) != 0) { @@ -2782,6 +2800,9 @@ static int control_listnodes(struct ctdb_context *ctdb, int argc, const char **a } for(i=0;inum;i++){ + if (nodemap->nodes[i].flags & NODE_FLAGS_DELETED) { + continue; + } printf("%s\n", ctdb_addr_to_str(&nodemap->nodes[i].addr)); }