2006-01-02 21:04:38 +03:00
/*
* net / tipc / bcast . c : TIPC broadcast code
2007-02-09 17:25:21 +03:00
*
2006-01-11 21:14:19 +03:00
* Copyright ( c ) 2004 - 2006 , Ericsson AB
2006-01-02 21:04:38 +03:00
* Copyright ( c ) 2004 , Intel Corporation .
2011-01-07 21:00:11 +03:00
* Copyright ( c ) 2005 , 2010 - 2011 , Wind River Systems
2006-01-02 21:04:38 +03:00
* All rights reserved .
*
2006-01-11 15:30:43 +03:00
* Redistribution and use in source and binary forms , with or without
2006-01-02 21:04:38 +03:00
* modification , are permitted provided that the following conditions are met :
*
2006-01-11 15:30:43 +03:00
* 1. Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* 2. Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution .
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission .
2006-01-02 21:04:38 +03:00
*
2006-01-11 15:30:43 +03:00
* Alternatively , this software may be distributed under the terms of the
* GNU General Public License ( " GPL " ) version 2 as published by the Free
* Software Foundation .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS
* INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN
* CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE )
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE
2006-01-02 21:04:38 +03:00
* POSSIBILITY OF SUCH DAMAGE .
*/
# include "core.h"
# include "link.h"
# include "port.h"
# include "bcast.h"
# define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
# define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @ primary : pointer to primary bearer
* @ secondary : pointer to secondary bearer
2007-02-09 17:25:21 +03:00
*
* Bearers must have same priority and same set of reachable destinations
2006-01-02 21:04:38 +03:00
* to be paired .
*/
struct bcbearer_pair {
2011-01-07 21:00:11 +03:00
struct tipc_bearer * primary ;
struct tipc_bearer * secondary ;
2006-01-02 21:04:38 +03:00
} ;
/**
* struct bcbearer - bearer used by broadcast link
* @ bearer : ( non - standard ) broadcast bearer structure
* @ media : ( non - standard ) broadcast media structure
* @ bpairs : array of bearer pairs
2006-06-26 10:53:20 +04:00
* @ bpairs_temp : temporary array of bearer pairs used by tipc_bcbearer_sort ( )
* @ remains : temporary node map used by tipc_bcbearer_send ( )
* @ remains_new : temporary node map used tipc_bcbearer_send ( )
2007-02-09 17:25:21 +03:00
*
2006-06-26 10:53:20 +04:00
* Note : The fields labelled " temporary " are incorporated into the bearer
* to avoid consuming potentially limited stack space through the use of
* large local variables within multicast routines . Concurrent access is
* prevented through use of the spinlock " bc_lock " .
2006-01-02 21:04:38 +03:00
*/
struct bcbearer {
2011-01-07 21:00:11 +03:00
struct tipc_bearer bearer ;
2006-01-02 21:04:38 +03:00
struct media media ;
struct bcbearer_pair bpairs [ MAX_BEARERS ] ;
2006-01-14 00:22:22 +03:00
struct bcbearer_pair bpairs_temp [ TIPC_MAX_LINK_PRI + 1 ] ;
2008-09-03 10:38:32 +04:00
struct tipc_node_map remains ;
struct tipc_node_map remains_new ;
2006-01-02 21:04:38 +03:00
} ;
/**
* struct bclink - link used for broadcast messages
* @ link : ( non - standard ) broadcast link structure
* @ node : ( non - standard ) node structure representing b ' cast link ' s peer node
2011-01-18 21:53:16 +03:00
* @ retransmit_to : node that most recently requested a retransmit
2007-02-09 17:25:21 +03:00
*
2006-01-02 21:04:38 +03:00
* Handles sequence numbering , fragmentation , bundling , etc .
*/
struct bclink {
struct link link ;
2008-09-03 10:38:32 +04:00
struct tipc_node node ;
2011-01-18 21:53:16 +03:00
struct tipc_node * retransmit_to ;
2006-01-02 21:04:38 +03:00
} ;
2010-12-31 21:59:34 +03:00
static struct bcbearer * bcbearer ;
static struct bclink * bclink ;
static struct link * bcl ;
2006-06-27 13:53:55 +04:00
static DEFINE_SPINLOCK ( bc_lock ) ;
2006-01-02 21:04:38 +03:00
2010-12-31 21:59:19 +03:00
/* broadcast-capable node map */
struct tipc_node_map tipc_bcast_nmap ;
2010-05-11 18:30:07 +04:00
const char tipc_bclink_name [ ] = " broadcast-link " ;
2006-01-02 21:04:38 +03:00
2010-10-13 17:20:35 +04:00
static void tipc_nmap_diff ( struct tipc_node_map * nm_a ,
struct tipc_node_map * nm_b ,
struct tipc_node_map * nm_diff ) ;
2006-01-02 21:04:38 +03:00
2006-03-21 09:37:04 +03:00
static u32 buf_seqno ( struct sk_buff * buf )
2006-01-02 21:04:38 +03:00
{
return msg_seqno ( buf_msg ( buf ) ) ;
2007-02-09 17:25:21 +03:00
}
2006-01-02 21:04:38 +03:00
2006-03-21 09:37:04 +03:00
static u32 bcbuf_acks ( struct sk_buff * buf )
2006-01-02 21:04:38 +03:00
{
2006-01-13 00:22:32 +03:00
return ( u32 ) ( unsigned long ) TIPC_SKB_CB ( buf ) - > handle ;
2006-01-02 21:04:38 +03:00
}
2006-03-21 09:37:04 +03:00
static void bcbuf_set_acks ( struct sk_buff * buf , u32 acks )
2006-01-02 21:04:38 +03:00
{
2006-01-13 00:22:32 +03:00
TIPC_SKB_CB ( buf ) - > handle = ( void * ) ( unsigned long ) acks ;
2006-01-02 21:04:38 +03:00
}
2006-03-21 09:37:04 +03:00
static void bcbuf_decr_acks ( struct sk_buff * buf )
2006-01-02 21:04:38 +03:00
{
bcbuf_set_acks ( buf , bcbuf_acks ( buf ) - 1 ) ;
}
2010-08-17 15:00:09 +04:00
static void bclink_set_last_sent ( void )
{
if ( bcl - > next_out )
bcl - > fsm_msg_cnt = mod ( buf_seqno ( bcl - > next_out ) - 1 ) ;
else
bcl - > fsm_msg_cnt = mod ( bcl - > next_out_no - 1 ) ;
}
u32 tipc_bclink_get_last_sent ( void )
{
return bcl - > fsm_msg_cnt ;
}
2007-02-09 17:25:21 +03:00
/**
2006-01-02 21:04:38 +03:00
* bclink_set_gap - set gap according to contents of current deferred pkt queue
2007-02-09 17:25:21 +03:00
*
2006-01-02 21:04:38 +03:00
* Called with ' node ' locked , bc_lock unlocked
*/
2008-09-03 10:38:32 +04:00
static void bclink_set_gap ( struct tipc_node * n_ptr )
2006-01-02 21:04:38 +03:00
{
struct sk_buff * buf = n_ptr - > bclink . deferred_head ;
n_ptr - > bclink . gap_after = n_ptr - > bclink . gap_to =
mod ( n_ptr - > bclink . last_in ) ;
if ( unlikely ( buf ! = NULL ) )
n_ptr - > bclink . gap_to = mod ( buf_seqno ( buf ) - 1 ) ;
}
2007-02-09 17:25:21 +03:00
/**
2006-01-02 21:04:38 +03:00
* bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
2007-02-09 17:25:21 +03:00
*
2006-01-02 21:04:38 +03:00
* This mechanism endeavours to prevent all nodes in network from trying
* to ACK or NACK at the same time .
2007-02-09 17:25:21 +03:00
*
2006-01-02 21:04:38 +03:00
* Note : TIPC uses a different trigger to distribute ACKs than it does to
2007-02-09 17:25:21 +03:00
* distribute NACKs , but tries to use the same spacing ( divide by 16 ) .
2006-01-02 21:04:38 +03:00
*/
2006-03-21 09:37:04 +03:00
static int bclink_ack_allowed ( u32 n )
2006-01-02 21:04:38 +03:00
{
2010-09-23 00:43:57 +04:00
return ( n % TIPC_MIN_LINK_WIN ) = = tipc_own_tag ;
2006-01-02 21:04:38 +03:00
}
2011-01-18 21:53:16 +03:00
/**
* tipc_bclink_retransmit_to - get most recent node to request retransmission
*
* Called with bc_lock locked
*/
struct tipc_node * tipc_bclink_retransmit_to ( void )
{
return bclink - > retransmit_to ;
}
2007-02-09 17:25:21 +03:00
/**
2006-01-02 21:04:38 +03:00
* bclink_retransmit_pkt - retransmit broadcast packets
* @ after : sequence number of last packet to * not * retransmit
* @ to : sequence number of last packet to retransmit
2007-02-09 17:25:21 +03:00
*
2006-06-26 10:40:01 +04:00
* Called with bc_lock locked
2006-01-02 21:04:38 +03:00
*/
static void bclink_retransmit_pkt ( u32 after , u32 to )
{
struct sk_buff * buf ;
buf = bcl - > first_out ;
2010-12-31 21:59:35 +03:00
while ( buf & & less_eq ( buf_seqno ( buf ) , after ) )
2007-02-09 17:25:21 +03:00
buf = buf - > next ;
2006-06-26 10:40:01 +04:00
tipc_link_retransmit ( bcl , buf , mod ( to - after ) ) ;
2006-01-02 21:04:38 +03:00
}
2007-02-09 17:25:21 +03:00
/**
2006-01-18 02:38:21 +03:00
* tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
2006-01-02 21:04:38 +03:00
* @ n_ptr : node that sent acknowledgement info
* @ acked : broadcast sequence # that has been acknowledged
2007-02-09 17:25:21 +03:00
*
2006-01-02 21:04:38 +03:00
* Node is locked , bc_lock unlocked .
*/
2008-09-03 10:38:32 +04:00
void tipc_bclink_acknowledge ( struct tipc_node * n_ptr , u32 acked )
2006-01-02 21:04:38 +03:00
{
struct sk_buff * crs ;
struct sk_buff * next ;
unsigned int released = 0 ;
if ( less_eq ( acked , n_ptr - > bclink . acked ) )
return ;
spin_lock_bh ( & bc_lock ) ;
/* Skip over packets that node has previously acknowledged */
crs = bcl - > first_out ;
2010-12-31 21:59:35 +03:00
while ( crs & & less_eq ( buf_seqno ( crs ) , n_ptr - > bclink . acked ) )
2006-01-02 21:04:38 +03:00
crs = crs - > next ;
/* Update packets that node is now acknowledging */
while ( crs & & less_eq ( buf_seqno ( crs ) , acked ) ) {
next = crs - > next ;
bcbuf_decr_acks ( crs ) ;
if ( bcbuf_acks ( crs ) = = 0 ) {
bcl - > first_out = next ;
bcl - > out_queue_size - - ;
buf_discard ( crs ) ;
released = 1 ;
}
crs = next ;
}
n_ptr - > bclink . acked = acked ;
/* Try resolving broadcast link congestion, if necessary */
2010-08-17 15:00:09 +04:00
if ( unlikely ( bcl - > next_out ) ) {
2006-01-18 02:38:21 +03:00
tipc_link_push_queue ( bcl ) ;
2010-08-17 15:00:09 +04:00
bclink_set_last_sent ( ) ;
}
2006-01-02 21:04:38 +03:00
if ( unlikely ( released & & ! list_empty ( & bcl - > waiting_ports ) ) )
2006-01-18 02:38:21 +03:00
tipc_link_wakeup_ports ( bcl , 0 ) ;
2006-01-02 21:04:38 +03:00
spin_unlock_bh ( & bc_lock ) ;
}
2007-02-09 17:25:21 +03:00
/**
2006-01-02 21:04:38 +03:00
* bclink_send_ack - unicast an ACK msg
2007-02-09 17:25:21 +03:00
*
2006-01-18 02:38:21 +03:00
* tipc_net_lock and node lock set
2006-01-02 21:04:38 +03:00
*/
2008-09-03 10:38:32 +04:00
static void bclink_send_ack ( struct tipc_node * n_ptr )
2006-01-02 21:04:38 +03:00
{
struct link * l_ptr = n_ptr - > active_links [ n_ptr - > addr & 1 ] ;
if ( l_ptr ! = NULL )
2006-01-18 02:38:21 +03:00
tipc_link_send_proto_msg ( l_ptr , STATE_MSG , 0 , 0 , 0 , 0 , 0 ) ;
2006-01-02 21:04:38 +03:00
}
2007-02-09 17:25:21 +03:00
/**
2006-01-02 21:04:38 +03:00
* bclink_send_nack - broadcast a NACK msg
2007-02-09 17:25:21 +03:00
*
2006-01-18 02:38:21 +03:00
* tipc_net_lock and node lock set
2006-01-02 21:04:38 +03:00
*/
2008-09-03 10:38:32 +04:00
static void bclink_send_nack ( struct tipc_node * n_ptr )
2006-01-02 21:04:38 +03:00
{
struct sk_buff * buf ;
struct tipc_msg * msg ;
if ( ! less ( n_ptr - > bclink . gap_after , n_ptr - > bclink . gap_to ) )
return ;
2010-10-13 17:20:35 +04:00
buf = tipc_buf_acquire ( INT_H_SIZE ) ;
2006-01-02 21:04:38 +03:00
if ( buf ) {
msg = buf_msg ( buf ) ;
2010-05-11 18:30:12 +04:00
tipc_msg_init ( msg , BCAST_PROTOCOL , STATE_MSG ,
2008-06-05 04:37:34 +04:00
INT_H_SIZE , n_ptr - > addr ) ;
2011-01-26 00:12:39 +03:00
msg_set_non_seq ( msg , 1 ) ;
2006-01-02 21:04:38 +03:00
msg_set_mc_netid ( msg , tipc_net_id ) ;
2007-02-09 17:25:21 +03:00
msg_set_bcast_ack ( msg , mod ( n_ptr - > bclink . last_in ) ) ;
2006-01-02 21:04:38 +03:00
msg_set_bcgap_after ( msg , n_ptr - > bclink . gap_after ) ;
msg_set_bcgap_to ( msg , n_ptr - > bclink . gap_to ) ;
msg_set_bcast_tag ( msg , tipc_own_tag ) ;
2006-03-21 09:36:47 +03:00
if ( tipc_bearer_send ( & bcbearer - > bearer , buf , NULL ) ) {
2006-01-02 21:04:38 +03:00
bcl - > stats . sent_nacks + + ;
buf_discard ( buf ) ;
} else {
2006-01-18 02:38:21 +03:00
tipc_bearer_schedule ( bcl - > b_ptr , bcl ) ;
2006-01-02 21:04:38 +03:00
bcl - > proto_msg_queue = buf ;
bcl - > stats . bearer_congs + + ;
}
2007-02-09 17:25:21 +03:00
/*
2006-01-02 21:04:38 +03:00
* Ensure we doesn ' t send another NACK msg to the node
* until 16 more deferred messages arrive from it
* ( i . e . helps prevent all nodes from NACK ' ing at same time )
*/
2007-02-09 17:25:21 +03:00
2006-01-02 21:04:38 +03:00
n_ptr - > bclink . nack_sync = tipc_own_tag ;
}
}
2007-02-09 17:25:21 +03:00
/**
2006-01-18 02:38:21 +03:00
* tipc_bclink_check_gap - send a NACK if a sequence gap exists
2006-01-02 21:04:38 +03:00
*
2006-01-18 02:38:21 +03:00
* tipc_net_lock and node lock set
2006-01-02 21:04:38 +03:00
*/
2008-09-03 10:38:32 +04:00
void tipc_bclink_check_gap ( struct tipc_node * n_ptr , u32 last_sent )
2006-01-02 21:04:38 +03:00
{
if ( ! n_ptr - > bclink . supported | |
less_eq ( last_sent , mod ( n_ptr - > bclink . last_in ) ) )
return ;
bclink_set_gap ( n_ptr ) ;
if ( n_ptr - > bclink . gap_after = = n_ptr - > bclink . gap_to )
n_ptr - > bclink . gap_to = last_sent ;
bclink_send_nack ( n_ptr ) ;
}
2007-02-09 17:25:21 +03:00
/**
2006-01-18 02:38:21 +03:00
* tipc_bclink_peek_nack - process a NACK msg meant for another node
2007-02-09 17:25:21 +03:00
*
2006-01-18 02:38:21 +03:00
* Only tipc_net_lock set .
2006-01-02 21:04:38 +03:00
*/
2006-03-21 09:37:52 +03:00
static void tipc_bclink_peek_nack ( u32 dest , u32 sender_tag , u32 gap_after , u32 gap_to )
2006-01-02 21:04:38 +03:00
{
2008-09-03 10:38:32 +04:00
struct tipc_node * n_ptr = tipc_node_find ( dest ) ;
2006-01-02 21:04:38 +03:00
u32 my_after , my_to ;
2006-01-18 02:38:21 +03:00
if ( unlikely ( ! n_ptr | | ! tipc_node_is_up ( n_ptr ) ) )
2006-01-02 21:04:38 +03:00
return ;
2006-01-18 02:38:21 +03:00
tipc_node_lock ( n_ptr ) ;
2006-01-02 21:04:38 +03:00
/*
* Modify gap to suppress unnecessary NACKs from this node
*/
my_after = n_ptr - > bclink . gap_after ;
my_to = n_ptr - > bclink . gap_to ;
if ( less_eq ( gap_after , my_after ) ) {
if ( less ( my_after , gap_to ) & & less ( gap_to , my_to ) )
n_ptr - > bclink . gap_after = gap_to ;
else if ( less_eq ( my_to , gap_to ) )
n_ptr - > bclink . gap_to = n_ptr - > bclink . gap_after ;
} else if ( less_eq ( gap_after , my_to ) ) {
if ( less_eq ( my_to , gap_to ) )
n_ptr - > bclink . gap_to = gap_after ;
} else {
2007-02-09 17:25:21 +03:00
/*
2006-01-02 21:04:38 +03:00
* Expand gap if missing bufs not in deferred queue :
*/
struct sk_buff * buf = n_ptr - > bclink . deferred_head ;
u32 prev = n_ptr - > bclink . gap_to ;
for ( ; buf ; buf = buf - > next ) {
u32 seqno = buf_seqno ( buf ) ;
2006-06-26 10:41:15 +04:00
if ( mod ( seqno - prev ) ! = 1 ) {
2006-01-02 21:04:38 +03:00
buf = NULL ;
2006-06-26 10:41:15 +04:00
break ;
}
2006-01-02 21:04:38 +03:00
if ( seqno = = gap_after )
break ;
prev = seqno ;
}
if ( buf = = NULL )
n_ptr - > bclink . gap_to = gap_after ;
}
/*
* Some nodes may send a complementary NACK now :
2007-02-09 17:25:21 +03:00
*/
2006-01-02 21:04:38 +03:00
if ( bclink_ack_allowed ( sender_tag + 1 ) ) {
if ( n_ptr - > bclink . gap_to ! = n_ptr - > bclink . gap_after ) {
bclink_send_nack ( n_ptr ) ;
bclink_set_gap ( n_ptr ) ;
}
}
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( n_ptr ) ;
2006-01-02 21:04:38 +03:00
}
/**
2006-01-18 02:38:21 +03:00
* tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
2006-01-02 21:04:38 +03:00
*/
2006-01-18 02:38:21 +03:00
int tipc_bclink_send_msg ( struct sk_buff * buf )
2006-01-02 21:04:38 +03:00
{
int res ;
spin_lock_bh ( & bc_lock ) ;
2006-01-18 02:38:21 +03:00
res = tipc_link_send_buf ( bcl , buf ) ;
2011-04-19 18:17:58 +04:00
if ( likely ( res > 0 ) )
2010-08-17 15:00:09 +04:00
bclink_set_last_sent ( ) ;
2006-01-02 21:04:38 +03:00
bcl - > stats . queue_sz_counts + + ;
bcl - > stats . accu_queue_sz + = bcl - > out_queue_size ;
spin_unlock_bh ( & bc_lock ) ;
return res ;
}
/**
2006-01-18 02:38:21 +03:00
* tipc_bclink_recv_pkt - receive a broadcast packet , and deliver upwards
2007-02-09 17:25:21 +03:00
*
2006-01-18 02:38:21 +03:00
* tipc_net_lock is read_locked , no other locks set
2006-01-02 21:04:38 +03:00
*/
2006-01-18 02:38:21 +03:00
void tipc_bclink_recv_pkt ( struct sk_buff * buf )
2006-06-26 10:40:01 +04:00
{
2006-01-02 21:04:38 +03:00
struct tipc_msg * msg = buf_msg ( buf ) ;
2011-04-07 21:57:25 +04:00
struct tipc_node * node ;
2006-01-02 21:04:38 +03:00
u32 next_in ;
u32 seqno ;
struct sk_buff * deferred ;
2011-04-07 21:57:25 +04:00
/* Screen out unwanted broadcast messages */
if ( msg_mc_netid ( msg ) ! = tipc_net_id )
goto exit ;
node = tipc_node_find ( msg_prevnode ( msg ) ) ;
if ( unlikely ( ! node ) )
goto exit ;
tipc_node_lock ( node ) ;
if ( unlikely ( ! node - > bclink . supported ) )
goto unlock ;
2006-01-02 21:04:38 +03:00
if ( unlikely ( msg_user ( msg ) = = BCAST_PROTOCOL ) ) {
if ( msg_destnode ( msg ) = = tipc_own_addr ) {
2006-01-18 02:38:21 +03:00
tipc_bclink_acknowledge ( node , msg_bcast_ack ( msg ) ) ;
tipc_node_unlock ( node ) ;
2006-06-26 10:40:01 +04:00
spin_lock_bh ( & bc_lock ) ;
2006-01-02 21:04:38 +03:00
bcl - > stats . recv_nacks + + ;
2011-01-18 21:53:16 +03:00
bclink - > retransmit_to = node ;
2006-01-02 21:04:38 +03:00
bclink_retransmit_pkt ( msg_bcgap_after ( msg ) ,
msg_bcgap_to ( msg ) ) ;
2007-02-09 17:25:21 +03:00
spin_unlock_bh ( & bc_lock ) ;
2006-01-02 21:04:38 +03:00
} else {
2011-04-07 21:57:25 +04:00
tipc_node_unlock ( node ) ;
2006-01-18 02:38:21 +03:00
tipc_bclink_peek_nack ( msg_destnode ( msg ) ,
2006-03-21 09:37:52 +03:00
msg_bcast_tag ( msg ) ,
msg_bcgap_after ( msg ) ,
msg_bcgap_to ( msg ) ) ;
2006-01-02 21:04:38 +03:00
}
2011-04-07 21:57:25 +04:00
goto exit ;
2006-01-02 21:04:38 +03:00
}
2011-04-07 21:57:25 +04:00
/* Handle in-sequence broadcast message */
2006-01-02 21:04:38 +03:00
receive :
next_in = mod ( node - > bclink . last_in + 1 ) ;
seqno = msg_seqno ( msg ) ;
if ( likely ( seqno = = next_in ) ) {
bcl - > stats . recv_info + + ;
node - > bclink . last_in + + ;
bclink_set_gap ( node ) ;
if ( unlikely ( bclink_ack_allowed ( seqno ) ) ) {
bclink_send_ack ( node ) ;
bcl - > stats . sent_acks + + ;
}
if ( likely ( msg_isdata ( msg ) ) ) {
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( node ) ;
tipc_port_recv_mcast ( buf , NULL ) ;
2006-01-02 21:04:38 +03:00
} else if ( msg_user ( msg ) = = MSG_BUNDLER ) {
bcl - > stats . recv_bundles + + ;
bcl - > stats . recv_bundled + = msg_msgcnt ( msg ) ;
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( node ) ;
tipc_link_recv_bundle ( buf ) ;
2006-01-02 21:04:38 +03:00
} else if ( msg_user ( msg ) = = MSG_FRAGMENTER ) {
bcl - > stats . recv_fragments + + ;
2006-01-18 02:38:21 +03:00
if ( tipc_link_recv_fragment ( & node - > bclink . defragm ,
& buf , & msg ) )
2006-01-02 21:04:38 +03:00
bcl - > stats . recv_fragmented + + ;
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( node ) ;
tipc_net_route_msg ( buf ) ;
2006-01-02 21:04:38 +03:00
} else {
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( node ) ;
tipc_net_route_msg ( buf ) ;
2006-01-02 21:04:38 +03:00
}
2011-04-07 21:57:25 +04:00
buf = NULL ;
tipc_node_lock ( node ) ;
2011-04-07 22:20:45 +04:00
deferred = node - > bclink . deferred_head ;
2006-01-02 21:04:38 +03:00
if ( deferred & & ( buf_seqno ( deferred ) = = mod ( next_in + 1 ) ) ) {
buf = deferred ;
msg = buf_msg ( buf ) ;
node - > bclink . deferred_head = deferred - > next ;
goto receive ;
}
} else if ( less ( next_in , seqno ) ) {
u32 gap_after = node - > bclink . gap_after ;
u32 gap_to = node - > bclink . gap_to ;
2006-01-18 02:38:21 +03:00
if ( tipc_link_defer_pkt ( & node - > bclink . deferred_head ,
& node - > bclink . deferred_tail ,
buf ) ) {
2006-01-02 21:04:38 +03:00
node - > bclink . nack_sync + + ;
bcl - > stats . deferred_recv + + ;
if ( seqno = = mod ( gap_after + 1 ) )
node - > bclink . gap_after = seqno ;
else if ( less ( gap_after , seqno ) & & less ( seqno , gap_to ) )
node - > bclink . gap_to = seqno ;
}
2011-04-07 21:57:25 +04:00
buf = NULL ;
2006-01-02 21:04:38 +03:00
if ( bclink_ack_allowed ( node - > bclink . nack_sync ) ) {
if ( gap_to ! = gap_after )
bclink_send_nack ( node ) ;
bclink_set_gap ( node ) ;
}
} else {
bcl - > stats . duplicates + + ;
}
2011-04-07 21:57:25 +04:00
unlock :
2006-01-18 02:38:21 +03:00
tipc_node_unlock ( node ) ;
2011-04-07 21:57:25 +04:00
exit :
buf_discard ( buf ) ;
2006-01-02 21:04:38 +03:00
}
2008-09-03 10:38:32 +04:00
u32 tipc_bclink_acks_missing ( struct tipc_node * n_ptr )
2006-01-02 21:04:38 +03:00
{
return ( n_ptr - > bclink . supported & &
2006-01-18 02:38:21 +03:00
( tipc_bclink_get_last_sent ( ) ! = n_ptr - > bclink . acked ) ) ;
2006-01-02 21:04:38 +03:00
}
/**
2006-01-18 02:38:21 +03:00
* tipc_bcbearer_send - send a packet through the broadcast pseudo - bearer
2007-02-09 17:25:21 +03:00
*
2011-04-07 18:44:54 +04:00
* Send packet over as many bearers as necessary to reach all nodes
* that have joined the broadcast link .
2007-02-09 17:25:21 +03:00
*
2011-04-07 18:44:54 +04:00
* Returns 0 ( packet sent successfully ) under all circumstances ,
* since the broadcast link ' s pseudo - bearer never blocks
2006-01-02 21:04:38 +03:00
*/
2006-03-21 09:37:52 +03:00
static int tipc_bcbearer_send ( struct sk_buff * buf ,
struct tipc_bearer * unused1 ,
struct tipc_media_addr * unused2 )
2006-01-02 21:04:38 +03:00
{
int bp_index ;
2011-04-07 18:44:54 +04:00
/*
* Prepare broadcast link message for reliable transmission ,
* if first time trying to send it ;
* preparation is skipped for broadcast link protocol messages
* since they are sent in an unreliable manner and don ' t need it
*/
2006-01-02 21:04:38 +03:00
if ( likely ( ! msg_non_seq ( buf_msg ( buf ) ) ) ) {
struct tipc_msg * msg ;
2010-12-31 21:59:19 +03:00
bcbuf_set_acks ( buf , tipc_bcast_nmap . count ) ;
2006-01-02 21:04:38 +03:00
msg = buf_msg ( buf ) ;
2008-06-05 04:54:48 +04:00
msg_set_non_seq ( msg , 1 ) ;
2006-01-02 21:04:38 +03:00
msg_set_mc_netid ( msg , tipc_net_id ) ;
2010-08-17 15:00:10 +04:00
bcl - > stats . sent_info + + ;
2011-05-23 21:14:18 +04:00
if ( WARN_ON ( ! tipc_bcast_nmap . count ) ) {
dump_stack ( ) ;
return 0 ;
}
2006-01-02 21:04:38 +03:00
}
/* Send buffer over bearers until all targets reached */
2007-02-09 17:25:21 +03:00
2010-12-31 21:59:19 +03:00
bcbearer - > remains = tipc_bcast_nmap ;
2006-01-02 21:04:38 +03:00
for ( bp_index = 0 ; bp_index < MAX_BEARERS ; bp_index + + ) {
2011-01-07 21:00:11 +03:00
struct tipc_bearer * p = bcbearer - > bpairs [ bp_index ] . primary ;
struct tipc_bearer * s = bcbearer - > bpairs [ bp_index ] . secondary ;
2006-01-02 21:04:38 +03:00
if ( ! p )
break ; /* no more bearers to try */
2006-06-26 10:53:20 +04:00
tipc_nmap_diff ( & bcbearer - > remains , & p - > nodes , & bcbearer - > remains_new ) ;
if ( bcbearer - > remains_new . count = = bcbearer - > remains . count )
2006-01-02 21:04:38 +03:00
continue ; /* bearer pair doesn't add anything */
2011-01-07 21:00:11 +03:00
if ( p - > blocked | |
p - > media - > send_msg ( buf , p , & p - > media - > bcast_addr ) ) {
2010-03-15 11:02:24 +03:00
/* unable to send on primary bearer */
2011-01-07 21:00:11 +03:00
if ( ! s | | s - > blocked | |
s - > media - > send_msg ( buf , s ,
2010-03-15 11:02:24 +03:00
& s - > media - > bcast_addr ) ) {
/* unable to send on either bearer */
continue ;
}
}
if ( s ) {
bcbearer - > bpairs [ bp_index ] . primary = s ;
bcbearer - > bpairs [ bp_index ] . secondary = p ;
2006-01-02 21:04:38 +03:00
}
2006-06-26 10:53:20 +04:00
if ( bcbearer - > remains_new . count = = 0 )
2011-04-07 18:44:54 +04:00
break ; /* all targets reached */
2006-01-02 21:04:38 +03:00
2006-06-26 10:53:20 +04:00
bcbearer - > remains = bcbearer - > remains_new ;
2006-01-02 21:04:38 +03:00
}
2007-02-09 17:25:21 +03:00
2011-04-07 18:44:54 +04:00
return 0 ;
2006-01-02 21:04:38 +03:00
}
/**
2006-01-18 02:38:21 +03:00
* tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
2006-01-02 21:04:38 +03:00
*/
2006-01-18 02:38:21 +03:00
void tipc_bcbearer_sort ( void )
2006-01-02 21:04:38 +03:00
{
struct bcbearer_pair * bp_temp = bcbearer - > bpairs_temp ;
struct bcbearer_pair * bp_curr ;
int b_index ;
int pri ;
spin_lock_bh ( & bc_lock ) ;
/* Group bearers by priority (can assume max of two per priority) */
memset ( bp_temp , 0 , sizeof ( bcbearer - > bpairs_temp ) ) ;
for ( b_index = 0 ; b_index < MAX_BEARERS ; b_index + + ) {
2011-01-07 21:00:11 +03:00
struct tipc_bearer * b = & tipc_bearers [ b_index ] ;
2006-01-02 21:04:38 +03:00
if ( ! b - > active | | ! b - > nodes . count )
continue ;
if ( ! bp_temp [ b - > priority ] . primary )
bp_temp [ b - > priority ] . primary = b ;
else
bp_temp [ b - > priority ] . secondary = b ;
}
/* Create array of bearer pairs for broadcasting */
bp_curr = bcbearer - > bpairs ;
memset ( bcbearer - > bpairs , 0 , sizeof ( bcbearer - > bpairs ) ) ;
2006-01-14 00:22:22 +03:00
for ( pri = TIPC_MAX_LINK_PRI ; pri > = 0 ; pri - - ) {
2006-01-02 21:04:38 +03:00
if ( ! bp_temp [ pri ] . primary )
continue ;
bp_curr - > primary = bp_temp [ pri ] . primary ;
if ( bp_temp [ pri ] . secondary ) {
2006-01-18 02:38:21 +03:00
if ( tipc_nmap_equal ( & bp_temp [ pri ] . primary - > nodes ,
& bp_temp [ pri ] . secondary - > nodes ) ) {
2006-01-02 21:04:38 +03:00
bp_curr - > secondary = bp_temp [ pri ] . secondary ;
} else {
bp_curr + + ;
bp_curr - > primary = bp_temp [ pri ] . secondary ;
}
}
bp_curr + + ;
}
spin_unlock_bh ( & bc_lock ) ;
}
2006-01-18 02:38:21 +03:00
int tipc_bclink_stats ( char * buf , const u32 buf_size )
2006-01-02 21:04:38 +03:00
{
struct print_buf pb ;
if ( ! bcl )
return 0 ;
2006-01-18 02:38:21 +03:00
tipc_printbuf_init ( & pb , buf , buf_size ) ;
2006-01-02 21:04:38 +03:00
spin_lock_bh ( & bc_lock ) ;
tipc_printf ( & pb , " Link <%s> \n "
2007-02-09 17:25:21 +03:00
" Window:%u packets \n " ,
2006-01-02 21:04:38 +03:00
bcl - > name , bcl - > queue_limit [ 0 ] ) ;
2007-02-09 17:25:21 +03:00
tipc_printf ( & pb , " RX packets:%u fragments:%u/%u bundles:%u/%u \n " ,
2006-01-02 21:04:38 +03:00
bcl - > stats . recv_info ,
bcl - > stats . recv_fragments ,
bcl - > stats . recv_fragmented ,
bcl - > stats . recv_bundles ,
bcl - > stats . recv_bundled ) ;
2007-02-09 17:25:21 +03:00
tipc_printf ( & pb , " TX packets:%u fragments:%u/%u bundles:%u/%u \n " ,
2006-01-02 21:04:38 +03:00
bcl - > stats . sent_info ,
bcl - > stats . sent_fragments ,
2007-02-09 17:25:21 +03:00
bcl - > stats . sent_fragmented ,
2006-01-02 21:04:38 +03:00
bcl - > stats . sent_bundles ,
bcl - > stats . sent_bundled ) ;
2007-02-09 17:25:21 +03:00
tipc_printf ( & pb , " RX naks:%u defs:%u dups:%u \n " ,
2006-01-02 21:04:38 +03:00
bcl - > stats . recv_nacks ,
2007-02-09 17:25:21 +03:00
bcl - > stats . deferred_recv ,
2006-01-02 21:04:38 +03:00
bcl - > stats . duplicates ) ;
2007-02-09 17:25:21 +03:00
tipc_printf ( & pb , " TX naks:%u acks:%u dups:%u \n " ,
bcl - > stats . sent_nacks ,
bcl - > stats . sent_acks ,
2006-01-02 21:04:38 +03:00
bcl - > stats . retransmitted ) ;
tipc_printf ( & pb , " Congestion bearer:%u link:%u Send queue max:%u avg:%u \n " ,
bcl - > stats . bearer_congs ,
bcl - > stats . link_congs ,
bcl - > stats . max_queue_sz ,
bcl - > stats . queue_sz_counts
? ( bcl - > stats . accu_queue_sz / bcl - > stats . queue_sz_counts )
: 0 ) ;
spin_unlock_bh ( & bc_lock ) ;
2006-01-18 02:38:21 +03:00
return tipc_printbuf_validate ( & pb ) ;
2006-01-02 21:04:38 +03:00
}
2006-01-18 02:38:21 +03:00
int tipc_bclink_reset_stats ( void )
2006-01-02 21:04:38 +03:00
{
if ( ! bcl )
return - ENOPROTOOPT ;
spin_lock_bh ( & bc_lock ) ;
memset ( & bcl - > stats , 0 , sizeof ( bcl - > stats ) ) ;
spin_unlock_bh ( & bc_lock ) ;
2008-07-15 09:44:01 +04:00
return 0 ;
2006-01-02 21:04:38 +03:00
}
2006-01-18 02:38:21 +03:00
int tipc_bclink_set_queue_limits ( u32 limit )
2006-01-02 21:04:38 +03:00
{
if ( ! bcl )
return - ENOPROTOOPT ;
if ( ( limit < TIPC_MIN_LINK_WIN ) | | ( limit > TIPC_MAX_LINK_WIN ) )
return - EINVAL ;
spin_lock_bh ( & bc_lock ) ;
2006-01-18 02:38:21 +03:00
tipc_link_set_queue_limits ( bcl , limit ) ;
2006-01-02 21:04:38 +03:00
spin_unlock_bh ( & bc_lock ) ;
2008-07-15 09:44:01 +04:00
return 0 ;
2006-01-02 21:04:38 +03:00
}
2006-01-18 02:38:21 +03:00
int tipc_bclink_init ( void )
2006-01-02 21:04:38 +03:00
{
2006-11-21 06:22:12 +03:00
bcbearer = kzalloc ( sizeof ( * bcbearer ) , GFP_ATOMIC ) ;
bclink = kzalloc ( sizeof ( * bclink ) , GFP_ATOMIC ) ;
2006-01-02 21:04:38 +03:00
if ( ! bcbearer | | ! bclink ) {
2011-04-07 18:22:31 +04:00
warn ( " Broadcast link creation failed, no memory \n " ) ;
2006-01-02 21:04:38 +03:00
kfree ( bcbearer ) ;
bcbearer = NULL ;
kfree ( bclink ) ;
bclink = NULL ;
return - ENOMEM ;
}
INIT_LIST_HEAD ( & bcbearer - > bearer . cong_links ) ;
bcbearer - > bearer . media = & bcbearer - > media ;
2006-01-18 02:38:21 +03:00
bcbearer - > media . send_msg = tipc_bcbearer_send ;
2011-04-07 18:22:31 +04:00
sprintf ( bcbearer - > media . name , " tipc-broadcast " ) ;
2006-01-02 21:04:38 +03:00
bcl = & bclink - > link ;
INIT_LIST_HEAD ( & bcl - > waiting_ports ) ;
bcl - > next_out_no = 1 ;
2006-06-27 13:53:55 +04:00
spin_lock_init ( & bclink - > node . lock ) ;
2006-01-02 21:04:38 +03:00
bcl - > owner = & bclink - > node ;
2007-02-09 17:25:21 +03:00
bcl - > max_pkt = MAX_PKT_DEFAULT_MCAST ;
2006-01-18 02:38:21 +03:00
tipc_link_set_queue_limits ( bcl , BCLINK_WIN_DEFAULT ) ;
2006-01-02 21:04:38 +03:00
bcl - > b_ptr = & bcbearer - > bearer ;
bcl - > state = WORKING_WORKING ;
2009-03-19 05:11:29 +03:00
strlcpy ( bcl - > name , tipc_bclink_name , TIPC_MAX_LINK_NAME ) ;
2006-01-02 21:04:38 +03:00
2008-07-15 09:44:01 +04:00
return 0 ;
2006-01-02 21:04:38 +03:00
}
2006-01-18 02:38:21 +03:00
void tipc_bclink_stop ( void )
2006-01-02 21:04:38 +03:00
{
spin_lock_bh ( & bc_lock ) ;
if ( bcbearer ) {
2006-01-18 02:38:21 +03:00
tipc_link_stop ( bcl ) ;
2006-01-02 21:04:38 +03:00
bcl = NULL ;
kfree ( bclink ) ;
bclink = NULL ;
kfree ( bcbearer ) ;
bcbearer = NULL ;
}
spin_unlock_bh ( & bc_lock ) ;
}
2010-05-11 18:30:14 +04:00
/**
* tipc_nmap_add - add a node to a node map
*/
void tipc_nmap_add ( struct tipc_node_map * nm_ptr , u32 node )
{
int n = tipc_node ( node ) ;
int w = n / WSIZE ;
u32 mask = ( 1 < < ( n % WSIZE ) ) ;
if ( ( nm_ptr - > map [ w ] & mask ) = = 0 ) {
nm_ptr - > count + + ;
nm_ptr - > map [ w ] | = mask ;
}
}
/**
* tipc_nmap_remove - remove a node from a node map
*/
void tipc_nmap_remove ( struct tipc_node_map * nm_ptr , u32 node )
{
int n = tipc_node ( node ) ;
int w = n / WSIZE ;
u32 mask = ( 1 < < ( n % WSIZE ) ) ;
if ( ( nm_ptr - > map [ w ] & mask ) ! = 0 ) {
nm_ptr - > map [ w ] & = ~ mask ;
nm_ptr - > count - - ;
}
}
/**
* tipc_nmap_diff - find differences between node maps
* @ nm_a : input node map A
* @ nm_b : input node map B
* @ nm_diff : output node map A - B ( i . e . nodes of A that are not in B )
*/
2010-10-13 17:20:35 +04:00
static void tipc_nmap_diff ( struct tipc_node_map * nm_a ,
struct tipc_node_map * nm_b ,
struct tipc_node_map * nm_diff )
2010-05-11 18:30:14 +04:00
{
int stop = ARRAY_SIZE ( nm_a - > map ) ;
int w ;
int b ;
u32 map ;
memset ( nm_diff , 0 , sizeof ( * nm_diff ) ) ;
for ( w = 0 ; w < stop ; w + + ) {
map = nm_a - > map [ w ] ^ ( nm_a - > map [ w ] & nm_b - > map [ w ] ) ;
nm_diff - > map [ w ] = map ;
if ( map ! = 0 ) {
for ( b = 0 ; b < WSIZE ; b + + ) {
if ( map & ( 1 < < b ) )
nm_diff - > count + + ;
}
}
}
}
2010-05-11 18:30:15 +04:00
/**
* tipc_port_list_add - add a port to a port list , ensuring no duplicates
*/
void tipc_port_list_add ( struct port_list * pl_ptr , u32 port )
{
struct port_list * item = pl_ptr ;
int i ;
int item_sz = PLSIZE ;
int cnt = pl_ptr - > count ;
for ( ; ; cnt - = item_sz , item = item - > next ) {
if ( cnt < PLSIZE )
item_sz = cnt ;
for ( i = 0 ; i < item_sz ; i + + )
if ( item - > ports [ i ] = = port )
return ;
if ( i < PLSIZE ) {
item - > ports [ i ] = port ;
pl_ptr - > count + + ;
return ;
}
if ( ! item - > next ) {
item - > next = kmalloc ( sizeof ( * item ) , GFP_ATOMIC ) ;
if ( ! item - > next ) {
warn ( " Incomplete multicast delivery, no memory \n " ) ;
return ;
}
item - > next - > next = NULL ;
}
}
}
/**
* tipc_port_list_free - free dynamically created entries in port_list chain
*
*/
void tipc_port_list_free ( struct port_list * pl_ptr )
{
struct port_list * item ;
struct port_list * next ;
for ( item = pl_ptr - > next ; item ; item = next ) {
next = item - > next ;
kfree ( item ) ;
}
}