2013-01-04 06:05:31 +04:00
/* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# include "main.h"
2011-11-23 14:35:44 +04:00
# include "distributed-arp-table.h"
2010-12-13 14:19:28 +03:00
# include "originator.h"
# include "hash.h"
# include "translation-table.h"
# include "routing.h"
# include "gateway_client.h"
# include "hard-interface.h"
# include "unicast.h"
# include "soft-interface.h"
2012-01-22 23:00:19 +04:00
# include "bridge_loop_avoidance.h"
2010-12-13 14:19:28 +03:00
2012-11-10 14:00:32 +04:00
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key ;
2012-05-12 20:34:00 +04:00
static void batadv_purge_orig ( struct work_struct * work ) ;
2010-12-13 14:19:28 +03:00
2011-06-15 17:08:59 +04:00
/* returns 1 if they are the same originator */
2012-05-12 20:34:00 +04:00
static int batadv_compare_orig ( const struct hlist_node * node , const void * data2 )
2011-06-15 17:08:59 +04:00
{
2012-06-06 00:31:31 +04:00
const void * data1 = container_of ( node , struct batadv_orig_node ,
hash_entry ) ;
2011-06-15 17:08:59 +04:00
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
2012-06-06 00:31:31 +04:00
int batadv_originator_init ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
if ( bat_priv - > orig_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:32 +04:00
bat_priv - > orig_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
if ( ! bat_priv - > orig_hash )
goto err ;
2012-11-10 14:00:32 +04:00
batadv_hash_set_lock_class ( bat_priv - > orig_hash ,
& batadv_orig_hash_lock_class_key ) ;
2012-12-25 16:14:37 +04:00
INIT_DELAYED_WORK ( & bat_priv - > orig_work , batadv_purge_orig ) ;
queue_delayed_work ( batadv_event_workqueue ,
& bat_priv - > orig_work ,
msecs_to_jiffies ( BATADV_ORIG_WORK_PERIOD ) ) ;
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
err :
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
void batadv_neigh_node_free_ref ( struct batadv_neigh_node * neigh_node )
2011-01-19 23:01:43 +03:00
{
2011-02-10 17:33:53 +03:00
if ( atomic_dec_and_test ( & neigh_node - > refcount ) )
2011-05-02 10:27:50 +04:00
kfree_rcu ( neigh_node , rcu ) ;
2011-01-19 23:01:43 +03:00
}
2011-03-15 01:43:37 +03:00
/* increases the refcounter of a found router */
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node *
batadv_orig_node_get_router ( struct batadv_orig_node * orig_node )
2011-03-15 01:43:37 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * router ;
2011-03-15 01:43:37 +03:00
rcu_read_lock ( ) ;
router = rcu_dereference ( orig_node - > router ) ;
if ( router & & ! atomic_inc_not_zero ( & router - > refcount ) )
router = NULL ;
rcu_read_unlock ( ) ;
return router ;
}
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node *
batadv_neigh_node_new ( struct batadv_hard_iface * hard_iface ,
const uint8_t * neigh_addr , uint32_t seqno )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
struct batadv_neigh_node * neigh_node ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:54 +04:00
neigh_node = kzalloc ( sizeof ( * neigh_node ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! neigh_node )
2012-03-01 11:35:21 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2010-12-13 00:57:11 +03:00
INIT_HLIST_NODE ( & neigh_node - > list ) ;
2010-12-13 14:19:28 +03:00
2012-03-01 11:35:21 +04:00
memcpy ( neigh_node - > addr , neigh_addr , ETH_ALEN ) ;
2012-03-17 11:28:32 +04:00
spin_lock_init ( & neigh_node - > lq_update_lock ) ;
2011-02-18 15:28:11 +03:00
/* extra reference for return */
atomic_set ( & neigh_node - > refcount , 2 ) ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Creating new neighbor %pM, initial seqno %d \n " ,
neigh_addr , seqno ) ;
2012-03-01 11:35:21 +04:00
out :
2010-12-13 14:19:28 +03:00
return neigh_node ;
}
2012-05-12 20:34:00 +04:00
static void batadv_orig_node_free_rcu ( struct rcu_head * rcu )
2010-12-13 14:19:28 +03:00
{
2010-12-13 00:57:11 +03:00
struct hlist_node * node , * node_tmp ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node , * tmp_neigh_node ;
struct batadv_orig_node * orig_node ;
2011-01-19 23:01:42 +03:00
2012-06-06 00:31:31 +04:00
orig_node = container_of ( rcu , struct batadv_orig_node , rcu ) ;
2010-12-13 14:19:28 +03:00
2010-12-13 00:57:12 +03:00
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
2011-01-19 23:01:43 +03:00
/* for all bonding members ... */
list_for_each_entry_safe ( neigh_node , tmp_neigh_node ,
& orig_node - > bond_list , bonding_list ) {
list_del_rcu ( & neigh_node - > bonding_list ) ;
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-01-19 23:01:43 +03:00
}
2010-12-13 14:19:28 +03:00
/* for all neighbors towards this originator ... */
2010-12-13 00:57:11 +03:00
hlist_for_each_entry_safe ( neigh_node , node , node_tmp ,
& orig_node - > neigh_list , list ) {
2010-12-13 00:57:12 +03:00
hlist_del_rcu ( & neigh_node - > list ) ;
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2010-12-13 14:19:28 +03:00
}
2010-12-13 00:57:12 +03:00
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
2012-05-12 04:09:40 +04:00
batadv_frag_list_free ( & orig_node - > frag_list ) ;
2012-05-12 04:09:39 +04:00
batadv_tt_global_del_orig ( orig_node - > bat_priv , orig_node ,
" originator timed out " ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
kfree ( orig_node - > tt_buff ) ;
2010-12-13 14:19:28 +03:00
kfree ( orig_node - > bcast_own ) ;
kfree ( orig_node - > bcast_own_sum ) ;
kfree ( orig_node ) ;
}
2012-06-06 00:31:31 +04:00
void batadv_orig_node_free_ref ( struct batadv_orig_node * orig_node )
2011-02-18 15:28:10 +03:00
{
if ( atomic_dec_and_test ( & orig_node - > refcount ) )
2012-05-12 20:34:00 +04:00
call_rcu ( & orig_node - > rcu , batadv_orig_node_free_rcu ) ;
2011-02-18 15:28:10 +03:00
}
2012-06-06 00:31:31 +04:00
void batadv_originator_free ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2011-01-19 23:01:42 +03:00
struct hlist_head * head ;
spinlock_t * list_lock ; /* spinlock to protect write access */
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-01-19 23:01:42 +03:00
if ( ! hash )
2010-12-13 14:19:28 +03:00
return ;
cancel_delayed_work_sync ( & bat_priv - > orig_work ) ;
bat_priv - > orig_hash = NULL ;
2011-01-19 23:01:42 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_safe ( orig_node , node , node_tmp ,
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-01-19 23:01:42 +03:00
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2010-12-13 14:19:28 +03:00
}
/* this function finds or creates an originator entry for the given
2012-05-12 04:09:43 +04:00
* address if it does not exits
*/
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * batadv_get_orig_node ( struct batadv_priv * bat_priv ,
const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2010-12-13 14:19:28 +03:00
int size ;
int hash_added ;
2012-06-04 00:19:17 +04:00
unsigned long reset_time ;
2010-12-13 14:19:28 +03:00
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , addr ) ;
2011-02-18 15:28:09 +03:00
if ( orig_node )
2010-12-13 14:19:28 +03:00
return orig_node ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Creating new originator: %pM \n " , addr ) ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:54 +04:00
orig_node = kzalloc ( sizeof ( * orig_node ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! orig_node )
return NULL ;
2010-12-13 00:57:11 +03:00
INIT_HLIST_HEAD ( & orig_node - > neigh_list ) ;
2011-01-19 23:01:43 +03:00
INIT_LIST_HEAD ( & orig_node - > bond_list ) ;
2011-01-19 23:01:42 +03:00
spin_lock_init ( & orig_node - > ogm_cnt_lock ) ;
2011-01-26 00:52:11 +03:00
spin_lock_init ( & orig_node - > bcast_seqno_lock ) ;
2010-12-13 00:57:12 +03:00
spin_lock_init ( & orig_node - > neigh_list_lock ) ;
2011-04-27 16:27:44 +04:00
spin_lock_init ( & orig_node - > tt_buff_lock ) ;
2011-02-18 15:28:10 +03:00
/* extra reference for return */
atomic_set ( & orig_node - > refcount , 2 ) ;
2010-12-13 14:19:28 +03:00
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = false ;
2011-01-19 23:01:42 +03:00
orig_node - > bat_priv = bat_priv ;
2010-12-13 14:19:28 +03:00
memcpy ( orig_node - > orig , addr , ETH_ALEN ) ;
2011-11-23 14:35:44 +04:00
batadv_dat_init_orig_node_addr ( orig_node ) ;
2010-12-13 14:19:28 +03:00
orig_node - > router = NULL ;
2011-07-07 03:40:57 +04:00
orig_node - > tt_crc = 0 ;
atomic_set ( & orig_node - > last_ttvn , 0 ) ;
2011-05-05 10:42:45 +04:00
orig_node - > tt_buff = NULL ;
2011-04-27 16:27:44 +04:00
orig_node - > tt_buff_len = 0 ;
atomic_set ( & orig_node - > tt_size , 0 ) ;
2012-06-04 00:19:17 +04:00
reset_time = jiffies - 1 - msecs_to_jiffies ( BATADV_RESET_PROTECTION_MS ) ;
orig_node - > bcast_seqno_reset = reset_time ;
orig_node - > batman_seqno_reset = reset_time ;
2010-12-13 14:19:28 +03:00
2011-01-19 23:01:43 +03:00
atomic_set ( & orig_node - > bond_candidates , 0 ) ;
2012-06-04 00:19:17 +04:00
size = bat_priv - > num_ifaces * sizeof ( unsigned long ) * BATADV_NUM_WORDS ;
2010-12-13 14:19:28 +03:00
orig_node - > bcast_own = kzalloc ( size , GFP_ATOMIC ) ;
if ( ! orig_node - > bcast_own )
goto free_orig_node ;
size = bat_priv - > num_ifaces * sizeof ( uint8_t ) ;
orig_node - > bcast_own_sum = kzalloc ( size , GFP_ATOMIC ) ;
INIT_LIST_HEAD ( & orig_node - > frag_list ) ;
orig_node - > last_frag_packet = 0 ;
if ( ! orig_node - > bcast_own_sum )
goto free_bcast_own ;
2012-05-12 20:34:00 +04:00
hash_added = batadv_hash_add ( bat_priv - > orig_hash , batadv_compare_orig ,
2012-05-12 15:48:56 +04:00
batadv_choose_orig , orig_node ,
2012-05-12 15:48:55 +04:00
& orig_node - > hash_entry ) ;
2011-07-10 02:36:36 +04:00
if ( hash_added ! = 0 )
2010-12-13 14:19:28 +03:00
goto free_bcast_own_sum ;
return orig_node ;
free_bcast_own_sum :
kfree ( orig_node - > bcast_own_sum ) ;
free_bcast_own :
kfree ( orig_node - > bcast_own ) ;
free_orig_node :
kfree ( orig_node ) ;
return NULL ;
}
2012-06-06 00:31:31 +04:00
static bool
batadv_purge_orig_neighbors ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
struct batadv_neigh_node * * best_neigh_node )
2010-12-13 14:19:28 +03:00
{
2010-12-13 00:57:11 +03:00
struct hlist_node * node , * node_tmp ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node ;
2010-12-13 14:19:28 +03:00
bool neigh_purged = false ;
2012-03-01 11:35:20 +04:00
unsigned long last_seen ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming ;
2010-12-13 14:19:28 +03:00
* best_neigh_node = NULL ;
2010-12-13 00:57:12 +03:00
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
2010-12-13 14:19:28 +03:00
/* for all neighbors towards this originator ... */
2010-12-13 00:57:11 +03:00
hlist_for_each_entry_safe ( neigh_node , node , node_tmp ,
& orig_node - > neigh_list , list ) {
2012-05-12 15:48:58 +04:00
last_seen = neigh_node - > last_seen ;
if_incoming = neigh_node - > if_incoming ;
2012-06-04 00:19:17 +04:00
if ( ( batadv_has_timed_out ( last_seen , BATADV_PURGE_TIMEOUT ) ) | |
2012-06-04 00:19:19 +04:00
( if_incoming - > if_status = = BATADV_IF_INACTIVE ) | |
( if_incoming - > if_status = = BATADV_IF_NOT_IN_USE ) | |
( if_incoming - > if_status = = BATADV_IF_TO_BE_REMOVED ) ) {
if ( ( if_incoming - > if_status = = BATADV_IF_INACTIVE ) | |
( if_incoming - > if_status = = BATADV_IF_NOT_IN_USE ) | |
( if_incoming - > if_status = = BATADV_IF_TO_BE_REMOVED ) )
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" neighbor purge: originator %pM, neighbor: %pM, iface: %s \n " ,
orig_node - > orig , neigh_node - > addr ,
if_incoming - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
else
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u \n " ,
orig_node - > orig , neigh_node - > addr ,
jiffies_to_msecs ( last_seen ) ) ;
2010-12-13 14:19:28 +03:00
neigh_purged = true ;
2010-12-13 00:57:11 +03:00
2010-12-13 00:57:12 +03:00
hlist_del_rcu ( & neigh_node - > list ) ;
2012-05-12 04:09:36 +04:00
batadv_bonding_candidate_del ( orig_node , neigh_node ) ;
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2010-12-13 14:19:28 +03:00
} else {
if ( ( ! * best_neigh_node ) | |
( neigh_node - > tq_avg > ( * best_neigh_node ) - > tq_avg ) )
* best_neigh_node = neigh_node ;
}
}
2010-12-13 00:57:12 +03:00
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
2010-12-13 14:19:28 +03:00
return neigh_purged ;
}
2012-06-06 00:31:31 +04:00
static bool batadv_purge_orig_node ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * best_neigh_node ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:17 +04:00
if ( batadv_has_timed_out ( orig_node - > last_seen ,
2 * BATADV_PURGE_TIMEOUT ) ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Originator timeout: originator %pM, last_seen %u \n " ,
orig_node - > orig ,
jiffies_to_msecs ( orig_node - > last_seen ) ) ;
2010-12-13 14:19:28 +03:00
return true ;
} else {
2012-05-12 20:34:00 +04:00
if ( batadv_purge_orig_neighbors ( bat_priv , orig_node ,
& best_neigh_node ) )
2012-05-12 04:09:36 +04:00
batadv_update_route ( bat_priv , orig_node ,
best_neigh_node ) ;
2010-12-13 14:19:28 +03:00
}
return false ;
}
2012-06-06 00:31:31 +04:00
static void _batadv_purge_orig ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-01-19 23:01:40 +03:00
spinlock_t * list_lock ; /* spinlock to protect write access */
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
if ( ! hash )
return ;
/* for all origins... */
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-01-19 23:01:40 +03:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-01-19 23:01:40 +03:00
spin_lock_bh ( list_lock ) ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_safe ( orig_node , node , node_tmp ,
head , hash_entry ) {
2012-05-12 20:34:00 +04:00
if ( batadv_purge_orig_node ( bat_priv , orig_node ) ) {
2010-12-13 14:19:28 +03:00
if ( orig_node - > gw_flags )
2012-05-12 04:09:29 +04:00
batadv_gw_node_delete ( bat_priv ,
orig_node ) ;
2011-02-18 15:28:09 +03:00
hlist_del_rcu ( node ) ;
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-01-19 23:01:40 +03:00
continue ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 15:48:58 +04:00
if ( batadv_has_timed_out ( orig_node - > last_frag_packet ,
2012-06-04 00:19:15 +04:00
BATADV_FRAG_TIMEOUT ) )
2012-05-12 04:09:40 +04:00
batadv_frag_list_free ( & orig_node - > frag_list ) ;
2010-12-13 14:19:28 +03:00
}
2011-01-19 23:01:40 +03:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:29 +04:00
batadv_gw_node_purge ( bat_priv ) ;
batadv_gw_election ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 20:34:00 +04:00
static void batadv_purge_orig ( struct work_struct * work )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct delayed_work * delayed_work ;
struct batadv_priv * bat_priv ;
2010-12-13 14:19:28 +03:00
2012-06-06 00:31:31 +04:00
delayed_work = container_of ( work , struct delayed_work , work ) ;
bat_priv = container_of ( delayed_work , struct batadv_priv , orig_work ) ;
2012-05-12 20:34:00 +04:00
_batadv_purge_orig ( bat_priv ) ;
2012-12-25 16:14:37 +04:00
queue_delayed_work ( batadv_event_workqueue ,
& bat_priv - > orig_work ,
msecs_to_jiffies ( BATADV_ORIG_WORK_PERIOD ) ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
void batadv_purge_orig_ref ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-05-12 20:34:00 +04:00
_batadv_purge_orig ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:34 +04:00
int batadv_orig_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( net_dev ) ;
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if ;
struct batadv_orig_node * orig_node ;
struct batadv_neigh_node * neigh_node , * neigh_node_tmp ;
2010-12-13 14:19:28 +03:00
int batman_count = 0 ;
int last_seen_secs ;
int last_seen_msecs ;
2012-06-19 22:26:30 +04:00
unsigned long last_seen_jiffies ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-20 17:40:58 +04:00
2012-08-03 19:15:46 +04:00
primary_if = batadv_seq_print_text_primary_if_get ( seq ) ;
if ( ! primary_if )
2011-04-20 17:40:58 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2011-07-05 12:42:51 +04:00
seq_printf ( seq , " [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)] \n " ,
2012-06-04 00:19:17 +04:00
BATADV_SOURCE_VERSION , primary_if - > net_dev - > name ,
2011-04-20 17:40:58 +04:00
primary_if - > net_dev - > dev_addr , net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
seq_printf ( seq , " %-15s %s (%s/%i) %17s [%10s]: %20s ... \n " ,
2012-06-04 00:19:17 +04:00
" Originator " , " last-seen " , " # " , BATADV_TQ_MAX_VALUE ,
" Nexthop " , " outgoingIF " , " Potential nexthops " ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-01-19 23:01:40 +03:00
rcu_read_lock ( ) ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_rcu ( orig_node , node , head , hash_entry ) {
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( orig_node ) ;
2011-03-15 01:43:37 +03:00
if ( ! neigh_node )
2010-12-13 14:19:28 +03:00
continue ;
2011-03-15 01:43:37 +03:00
if ( neigh_node - > tq_avg = = 0 )
goto next ;
2010-12-13 14:19:28 +03:00
2012-06-19 22:26:30 +04:00
last_seen_jiffies = jiffies - orig_node - > last_seen ;
last_seen_msecs = jiffies_to_msecs ( last_seen_jiffies ) ;
last_seen_secs = last_seen_msecs / 1000 ;
last_seen_msecs = last_seen_msecs % 1000 ;
2010-12-13 14:19:28 +03:00
seq_printf ( seq , " %pM %4i.%03is (%3i) %pM [%10s]: " ,
orig_node - > orig , last_seen_secs ,
last_seen_msecs , neigh_node - > tq_avg ,
neigh_node - > addr ,
neigh_node - > if_incoming - > net_dev - > name ) ;
2011-03-15 01:43:37 +03:00
hlist_for_each_entry_rcu ( neigh_node_tmp , node_tmp ,
2010-12-13 00:57:12 +03:00
& orig_node - > neigh_list , list ) {
2011-03-15 01:43:37 +03:00
seq_printf ( seq , " %pM (%3i) " ,
neigh_node_tmp - > addr ,
neigh_node_tmp - > tq_avg ) ;
2010-12-13 14:19:28 +03:00
}
seq_printf ( seq , " \n " ) ;
batman_count + + ;
2011-03-15 01:43:37 +03:00
next :
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2010-12-13 14:19:28 +03:00
}
2011-01-19 23:01:40 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-03-15 01:43:37 +03:00
if ( batman_count = = 0 )
2010-12-13 14:19:28 +03:00
seq_printf ( seq , " No batman nodes in range ... \n " ) ;
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2012-08-03 19:15:46 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static int batadv_orig_node_add_if ( struct batadv_orig_node * orig_node ,
int max_if_num )
2010-12-13 14:19:28 +03:00
{
void * data_ptr ;
2012-06-04 00:19:17 +04:00
size_t data_size , old_size ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:17 +04:00
data_size = max_if_num * sizeof ( unsigned long ) * BATADV_NUM_WORDS ;
old_size = ( max_if_num - 1 ) * sizeof ( unsigned long ) * BATADV_NUM_WORDS ;
data_ptr = kmalloc ( data_size , GFP_ATOMIC ) ;
2011-08-30 01:17:24 +04:00
if ( ! data_ptr )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:17 +04:00
memcpy ( data_ptr , orig_node - > bcast_own , old_size ) ;
2010-12-13 14:19:28 +03:00
kfree ( orig_node - > bcast_own ) ;
orig_node - > bcast_own = data_ptr ;
data_ptr = kmalloc ( max_if_num * sizeof ( uint8_t ) , GFP_ATOMIC ) ;
2011-08-30 01:17:24 +04:00
if ( ! data_ptr )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
memcpy ( data_ptr , orig_node - > bcast_own_sum ,
( max_if_num - 1 ) * sizeof ( uint8_t ) ) ;
kfree ( orig_node - > bcast_own_sum ) ;
orig_node - > bcast_own_sum = data_ptr ;
return 0 ;
}
2012-06-06 00:31:31 +04:00
int batadv_orig_hash_add_if ( struct batadv_hard_iface * hard_iface ,
int max_if_num )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret ;
2010-12-13 14:19:28 +03:00
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
2012-05-12 04:09:43 +04:00
* if_num
*/
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-01-19 23:01:40 +03:00
rcu_read_lock ( ) ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_rcu ( orig_node , node , head , hash_entry ) {
2011-01-19 23:01:42 +03:00
spin_lock_bh ( & orig_node - > ogm_cnt_lock ) ;
2012-05-12 20:34:00 +04:00
ret = batadv_orig_node_add_if ( orig_node , max_if_num ) ;
2011-01-19 23:01:42 +03:00
spin_unlock_bh ( & orig_node - > ogm_cnt_lock ) ;
2012-05-05 15:27:28 +04:00
if ( ret = = - ENOMEM )
2010-12-13 14:19:28 +03:00
goto err ;
}
2011-01-19 23:01:40 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
return 0 ;
err :
2011-01-19 23:01:40 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
return - ENOMEM ;
}
2012-06-06 00:31:31 +04:00
static int batadv_orig_node_del_if ( struct batadv_orig_node * orig_node ,
2012-05-12 20:34:00 +04:00
int max_if_num , int del_if_num )
2010-12-13 14:19:28 +03:00
{
void * data_ptr = NULL ;
int chunk_size ;
/* last interface was removed */
if ( max_if_num = = 0 )
goto free_bcast_own ;
2012-06-04 00:19:17 +04:00
chunk_size = sizeof ( unsigned long ) * BATADV_NUM_WORDS ;
2010-12-13 14:19:28 +03:00
data_ptr = kmalloc ( max_if_num * chunk_size , GFP_ATOMIC ) ;
2011-08-30 01:17:24 +04:00
if ( ! data_ptr )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
/* copy first part */
memcpy ( data_ptr , orig_node - > bcast_own , del_if_num * chunk_size ) ;
/* copy second part */
2011-05-15 01:14:49 +04:00
memcpy ( ( char * ) data_ptr + del_if_num * chunk_size ,
2010-12-13 14:19:28 +03:00
orig_node - > bcast_own + ( ( del_if_num + 1 ) * chunk_size ) ,
( max_if_num - del_if_num ) * chunk_size ) ;
free_bcast_own :
kfree ( orig_node - > bcast_own ) ;
orig_node - > bcast_own = data_ptr ;
if ( max_if_num = = 0 )
goto free_own_sum ;
data_ptr = kmalloc ( max_if_num * sizeof ( uint8_t ) , GFP_ATOMIC ) ;
2011-08-30 01:17:24 +04:00
if ( ! data_ptr )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
memcpy ( data_ptr , orig_node - > bcast_own_sum ,
del_if_num * sizeof ( uint8_t ) ) ;
2011-05-15 01:14:49 +04:00
memcpy ( ( char * ) data_ptr + del_if_num * sizeof ( uint8_t ) ,
2010-12-13 14:19:28 +03:00
orig_node - > bcast_own_sum + ( ( del_if_num + 1 ) * sizeof ( uint8_t ) ) ,
( max_if_num - del_if_num ) * sizeof ( uint8_t ) ) ;
free_own_sum :
kfree ( orig_node - > bcast_own_sum ) ;
orig_node - > bcast_own_sum = data_ptr ;
return 0 ;
}
2012-06-06 00:31:31 +04:00
int batadv_orig_hash_del_if ( struct batadv_hard_iface * hard_iface ,
int max_if_num )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * hard_iface_tmp ;
struct batadv_orig_node * orig_node ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret ;
2010-12-13 14:19:28 +03:00
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
2012-05-12 04:09:43 +04:00
* if_num
*/
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-01-19 23:01:40 +03:00
rcu_read_lock ( ) ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_rcu ( orig_node , node , head , hash_entry ) {
2011-01-19 23:01:42 +03:00
spin_lock_bh ( & orig_node - > ogm_cnt_lock ) ;
2012-05-12 20:34:00 +04:00
ret = batadv_orig_node_del_if ( orig_node , max_if_num ,
hard_iface - > if_num ) ;
2011-01-19 23:01:42 +03:00
spin_unlock_bh ( & orig_node - > ogm_cnt_lock ) ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
if ( ret = = - ENOMEM )
2010-12-13 14:19:28 +03:00
goto err ;
}
2011-01-19 23:01:40 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
rcu_read_lock ( ) ;
2012-05-12 04:09:42 +04:00
list_for_each_entry_rcu ( hard_iface_tmp , & batadv_hardif_list , list ) {
2012-06-04 00:19:19 +04:00
if ( hard_iface_tmp - > if_status = = BATADV_IF_NOT_IN_USE )
2010-12-13 14:19:28 +03:00
continue ;
2011-02-18 15:33:20 +03:00
if ( hard_iface = = hard_iface_tmp )
2010-12-13 14:19:28 +03:00
continue ;
2011-02-18 15:33:20 +03:00
if ( hard_iface - > soft_iface ! = hard_iface_tmp - > soft_iface )
2010-12-13 14:19:28 +03:00
continue ;
2011-02-18 15:33:20 +03:00
if ( hard_iface_tmp - > if_num > hard_iface - > if_num )
hard_iface_tmp - > if_num - - ;
2010-12-13 14:19:28 +03:00
}
rcu_read_unlock ( ) ;
2011-02-18 15:33:20 +03:00
hard_iface - > if_num = - 1 ;
2010-12-13 14:19:28 +03:00
return 0 ;
err :
2011-01-19 23:01:40 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
return - ENOMEM ;
}