linux/net/tipc/name_distr.c

442 lines
11 KiB
C
Raw Normal View History

/*
* net/tipc/name_distr.c: TIPC name distribution code
*
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
* Copyright (c) 2000-2006, 2014, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "link.h"
#include "name_distr.h"
/**
* struct publ_list - list of publications made by this node
* @list: circular list of publications
* @list_size: number of entries in list
*/
struct publ_list {
struct list_head list;
u32 size;
};
static struct publ_list publ_zone = {
.list = LIST_HEAD_INIT(publ_zone.list),
.size = 0,
};
static struct publ_list publ_cluster = {
.list = LIST_HEAD_INIT(publ_cluster.list),
.size = 0,
};
static struct publ_list publ_node = {
.list = LIST_HEAD_INIT(publ_node.list),
.size = 0,
};
static struct publ_list *publ_lists[] = {
NULL,
&publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
&publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
&publ_node /* publ_lists[TIPC_NODE_SCOPE] */
};
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
int sysctl_tipc_named_timeout __read_mostly = 2000;
/**
* struct tipc_dist_queue - queue holding deferred name table updates
*/
static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
struct distr_queue_item {
struct distr_item i;
u32 dtype;
u32 node;
unsigned long expires;
struct list_head next;
};
/**
* publ_to_item - add publication info to a publication message
*/
static void publ_to_item(struct distr_item *i, struct publication *p)
{
i->type = htonl(p->type);
i->lower = htonl(p->lower);
i->upper = htonl(p->upper);
i->ref = htonl(p->ref);
i->key = htonl(p->key);
}
/**
* named_prepare_buf - allocate & initialize a publication message
*/
static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
{
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
}
void named_cluster_distribute(struct sk_buff *buf)
{
struct sk_buff *obuf;
struct tipc_node *node;
u32 dnode;
rcu_read_lock();
list_for_each_entry_rcu(node, &tipc_node_list, list) {
dnode = node->addr;
if (in_own_node(dnode))
continue;
if (!tipc_node_active_links(node))
continue;
obuf = skb_copy(buf, GFP_ATOMIC);
if (!obuf)
break;
msg_set_destnode(buf_msg(obuf), dnode);
tipc_link_xmit(obuf, dnode, dnode);
}
rcu_read_unlock();
kfree_skb(buf);
}
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
struct sk_buff *tipc_named_publish(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
publ_lists[publ->scope]->size++;
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
pr_warn("Publication distribution failure\n");
return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
return buf;
}
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
*/
struct sk_buff *tipc_named_withdraw(struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
list_del(&publ->local_list);
publ_lists[publ->scope]->size--;
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
pr_warn("Withdrawal distribution failure\n");
return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
return buf;
}
/**
* named_distribute - prepare name info for bulk distribution to another node
* @msg_list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
*/
static void named_distribute(struct list_head *msg_list, u32 dnode,
struct publ_list *pls)
{
struct publication *publ;
struct sk_buff *buf = NULL;
struct distr_item *item = NULL;
uint dsz = pls->size * ITEM_SIZE;
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
uint rem = dsz;
uint msg_rem = 0;
list_for_each_entry(publ, &pls->list, local_list) {
/* Prepare next buffer: */
if (!buf) {
msg_rem = min_t(uint, rem, msg_dsz);
rem -= msg_rem;
buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
if (!buf) {
pr_warn("Bulk publication failure\n");
return;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
}
/* Pack publication into message: */
publ_to_item(item, publ);
item++;
msg_rem -= ITEM_SIZE;
/* Append full buffer to list: */
if (!msg_rem) {
list_add_tail((struct list_head *)buf, msg_list);
buf = NULL;
}
}
}
/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
void tipc_named_node_up(u32 dnode)
{
LIST_HEAD(msg_list);
struct sk_buff *buf_chain;
read_lock_bh(&tipc_nametbl_lock);
named_distribute(&msg_list, dnode, &publ_cluster);
named_distribute(&msg_list, dnode, &publ_zone);
read_unlock_bh(&tipc_nametbl_lock);
/* Convert circular list to linear list and send: */
buf_chain = (struct sk_buff *)msg_list.next;
((struct sk_buff *)msg_list.prev)->next = NULL;
tipc_link_xmit(buf_chain, dnode, dnode);
}
static void tipc_publ_subscribe(struct publication *publ, u32 addr)
{
struct tipc_node *node;
if (in_own_node(addr))
return;
node = tipc_node_find(addr);
if (!node) {
pr_warn("Node subscription rejected, unknown node 0x%x\n",
addr);
return;
}
tipc_node_lock(node);
list_add_tail(&publ->nodesub_list, &node->publ_list);
tipc_node_unlock(node);
}
static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
{
struct tipc_node *node;
node = tipc_node_find(addr);
if (!node)
return;
tipc_node_lock(node);
list_del_init(&publ->nodesub_list);
tipc_node_unlock(node);
}
/**
* tipc_publ_purge - remove publication associated with a failed node
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
static void tipc_publ_purge(struct publication *publ, u32 addr)
{
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
if (p)
tipc_publ_unsubscribe(p, addr);
write_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
pr_err("Unable to remove publication from failed node\n"
" (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node, publ->ref,
publ->key);
}
kfree(p);
}
void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
{
struct publication *publ, *tmp;
list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
tipc_publ_purge(publ, addr);
}
/**
* tipc_update_nametbl - try to process a nametable update and notify
* subscribers
*
* tipc_nametbl_lock must be held.
* Returns the publication item if successful, otherwise NULL.
*/
static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
{
struct publication *publ = NULL;
if (dtype == PUBLICATION) {
publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
ntohl(i->upper),
TIPC_CLUSTER_SCOPE, node,
ntohl(i->ref), ntohl(i->key));
if (publ) {
tipc_publ_subscribe(publ, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
node, ntohl(i->ref),
ntohl(i->key));
if (publ) {
tipc_publ_unsubscribe(publ, node);
kfree(publ);
return true;
}
} else {
pr_warn("Unrecognized name table message received\n");
}
return false;
}
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
/**
* tipc_named_add_backlog - add a failed name table update to the backlog
*
*/
static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
{
struct distr_queue_item *e;
unsigned long now = get_jiffies_64();
e = kzalloc(sizeof(*e), GFP_ATOMIC);
if (!e)
return;
e->dtype = type;
e->node = node;
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
memcpy(e, i, sizeof(*i));
list_add_tail(&e->next, &tipc_dist_queue);
}
/**
* tipc_named_process_backlog - try to process any pending name table updates
* from the network.
*/
void tipc_named_process_backlog(void)
{
struct distr_queue_item *e, *tmp;
char addr[16];
unsigned long now = get_jiffies_64();
list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
if (time_after(e->expires, now)) {
if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
continue;
} else {
tipc_addr_string_fill(addr, e->node);
pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
e->dtype, ntohl(e->i.type),
ntohl(e->i.lower),
ntohl(e->i.upper),
addr, ntohl(e->i.key));
}
list_del(&e->next);
kfree(e);
}
}
/**
tipc: align tipc function names with common naming practice in the network Rename the following functions, which are shorter and more in line with common naming practice in the network subsystem. tipc_bclink_send_msg->tipc_bclink_xmit tipc_bclink_recv_pkt->tipc_bclink_rcv tipc_disc_recv_msg->tipc_disc_rcv tipc_link_send_proto_msg->tipc_link_proto_xmit link_recv_proto_msg->tipc_link_proto_rcv link_send_sections_long->tipc_link_iovec_long_xmit tipc_link_send_sections_fast->tipc_link_iovec_xmit_fast tipc_link_send_sync->tipc_link_sync_xmit tipc_link_recv_sync->tipc_link_sync_rcv tipc_link_send_buf->__tipc_link_xmit tipc_link_send->tipc_link_xmit tipc_link_send_names->tipc_link_names_xmit tipc_named_recv->tipc_named_rcv tipc_link_recv_bundle->tipc_link_bundle_rcv tipc_link_dup_send_queue->tipc_link_dup_queue_xmit link_send_long_buf->tipc_link_frag_xmit tipc_multicast->tipc_port_mcast_xmit tipc_port_recv_mcast->tipc_port_mcast_rcv tipc_port_reject_sections->tipc_port_iovec_reject tipc_port_recv_proto_msg->tipc_port_proto_rcv tipc_connect->tipc_port_connect __tipc_connect->__tipc_port_connect __tipc_disconnect->__tipc_port_disconnect tipc_disconnect->tipc_port_disconnect tipc_shutdown->tipc_port_shutdown tipc_port_recv_msg->tipc_port_rcv tipc_port_recv_sections->tipc_port_iovec_rcv release->tipc_release accept->tipc_accept bind->tipc_bind get_name->tipc_getname poll->tipc_poll send_msg->tipc_sendmsg send_packet->tipc_send_packet send_stream->tipc_send_stream recv_msg->tipc_recvmsg recv_stream->tipc_recv_stream connect->tipc_connect listen->tipc_listen shutdown->tipc_shutdown setsockopt->tipc_setsockopt getsockopt->tipc_getsockopt Above changes have no impact on current users of the functions. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-18 12:06:46 +04:00
* tipc_named_rcv - process name table update message sent by another node
*/
tipc: align tipc function names with common naming practice in the network Rename the following functions, which are shorter and more in line with common naming practice in the network subsystem. tipc_bclink_send_msg->tipc_bclink_xmit tipc_bclink_recv_pkt->tipc_bclink_rcv tipc_disc_recv_msg->tipc_disc_rcv tipc_link_send_proto_msg->tipc_link_proto_xmit link_recv_proto_msg->tipc_link_proto_rcv link_send_sections_long->tipc_link_iovec_long_xmit tipc_link_send_sections_fast->tipc_link_iovec_xmit_fast tipc_link_send_sync->tipc_link_sync_xmit tipc_link_recv_sync->tipc_link_sync_rcv tipc_link_send_buf->__tipc_link_xmit tipc_link_send->tipc_link_xmit tipc_link_send_names->tipc_link_names_xmit tipc_named_recv->tipc_named_rcv tipc_link_recv_bundle->tipc_link_bundle_rcv tipc_link_dup_send_queue->tipc_link_dup_queue_xmit link_send_long_buf->tipc_link_frag_xmit tipc_multicast->tipc_port_mcast_xmit tipc_port_recv_mcast->tipc_port_mcast_rcv tipc_port_reject_sections->tipc_port_iovec_reject tipc_port_recv_proto_msg->tipc_port_proto_rcv tipc_connect->tipc_port_connect __tipc_connect->__tipc_port_connect __tipc_disconnect->__tipc_port_disconnect tipc_disconnect->tipc_port_disconnect tipc_shutdown->tipc_port_shutdown tipc_port_recv_msg->tipc_port_rcv tipc_port_recv_sections->tipc_port_iovec_rcv release->tipc_release accept->tipc_accept bind->tipc_bind get_name->tipc_getname poll->tipc_poll send_msg->tipc_sendmsg send_packet->tipc_send_packet send_stream->tipc_send_stream recv_msg->tipc_recvmsg recv_stream->tipc_recv_stream connect->tipc_connect listen->tipc_listen shutdown->tipc_shutdown setsockopt->tipc_setsockopt getsockopt->tipc_getsockopt Above changes have no impact on current users of the functions. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-02-18 12:06:46 +04:00
void tipc_named_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
struct distr_item *item = (struct distr_item *)msg_data(msg);
u32 count = msg_data_sz(msg) / ITEM_SIZE;
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
u32 node = msg_orignode(msg);
write_lock_bh(&tipc_nametbl_lock);
while (count--) {
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
if (!tipc_update_nametbl(item, node, msg_type(msg)))
tipc_named_add_backlog(item, msg_type(msg), node);
item++;
}
tipc: add name distributor resiliency queue TIPC name table updates are distributed asynchronously in a cluster, entailing a risk of certain race conditions. E.g., if two nodes simultaneously issue conflicting (overlapping) publications, this may not be detected until both publications have reached a third node, in which case one of the publications will be silently dropped on that node. Hence, we end up with an inconsistent name table. In most cases this conflict is just a temporary race, e.g., one node is issuing a publication under the assumption that a previous, conflicting, publication has already been withdrawn by the other node. However, because of the (rtt related) distributed update delay, this may not yet hold true on all nodes. The symptom of this failure is a syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error". In this commit we add a resiliency queue at the receiving end of the name table distributor. When insertion of an arriving publication fails, we retain it in this queue for a short amount of time, assuming that another update will arrive very soon and clear the conflict. If so happens, we insert the publication, otherwise we drop it. The (configurable) retention value defaults to 2000 ms. Knowing from experience that the situation described above is extremely rare, there is no risk that the queue will accumulate any large number of items. Signed-off-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 11:08:47 +04:00
tipc_named_process_backlog();
write_unlock_bh(&tipc_nametbl_lock);
kfree_skb(buf);
}
/**
* tipc_named_reinit - re-initialize local publications
*
* This routine is called whenever TIPC networking is enabled.
* All name table entries published by this node are updated to reflect
* the node's new network address.
*/
void tipc_named_reinit(void)
{
struct publication *publ;
int scope;
write_lock_bh(&tipc_nametbl_lock);
for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
publ->node = tipc_own_addr;
write_unlock_bh(&tipc_nametbl_lock);
}