2012-05-12 04:09:43 +04:00
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
2012-03-14 16:03:01 +04:00
* Marek Lindner , Simon Wunderlich , Antonio Quartulli
2010-12-13 14:19:28 +03:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# include "main.h"
# include "translation-table.h"
# include "soft-interface.h"
2011-04-20 17:40:58 +04:00
# include "hard-interface.h"
2011-04-27 16:27:44 +04:00
# include "send.h"
2010-12-13 14:19:28 +03:00
# include "hash.h"
# include "originator.h"
2011-04-27 16:27:44 +04:00
# include "routing.h"
2012-01-22 23:00:23 +04:00
# include "bridge_loop_avoidance.h"
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
# include <linux/crc16.h>
2012-06-06 00:31:31 +04:00
static void batadv_send_roam_adv ( struct batadv_priv * bat_priv , uint8_t * client ,
struct batadv_orig_node * orig_node ) ;
2012-05-16 22:23:16 +04:00
static void batadv_tt_purge ( struct work_struct * work ) ;
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_del_orig_list ( struct batadv_tt_global_entry * tt_global_entry ) ;
2012-07-06 01:38:29 +04:00
static void batadv_tt_global_del ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
const unsigned char * addr ,
const char * message , bool roaming ) ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:09 +03:00
/* returns 1 if they are the same mac addr */
2012-05-16 22:23:16 +04:00
static int batadv_compare_tt ( const struct hlist_node * node , const void * data2 )
2011-02-18 15:28:09 +03:00
{
2012-06-06 00:31:31 +04:00
const void * data1 = container_of ( node , struct batadv_tt_common_entry ,
2011-05-15 01:14:50 +04:00
hash_entry ) ;
2011-02-18 15:28:09 +03:00
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_start_timer ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-07-16 00:26:51 +04:00
INIT_DELAYED_WORK ( & bat_priv - > tt . work , batadv_tt_purge ) ;
queue_delayed_work ( batadv_event_workqueue , & bat_priv - > tt . work ,
2011-04-27 16:27:44 +04:00
msecs_to_jiffies ( 5000 ) ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static struct batadv_tt_common_entry *
2012-06-06 00:31:28 +04:00
batadv_tt_hash_find ( struct batadv_hashtable * hash , const void * data )
2011-02-18 15:28:09 +03:00
{
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_common_entry * tt_common_entry_tmp = NULL ;
2011-10-05 19:05:25 +04:00
uint32_t index ;
2011-02-18 15:28:09 +03:00
if ( ! hash )
return NULL ;
2012-05-12 15:48:56 +04:00
index = batadv_choose_orig ( data , hash - > size ) ;
2011-02-18 15:28:09 +03:00
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node , head , hash_entry ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( tt_common_entry , data ) )
2011-02-18 15:28:09 +03:00
continue ;
2011-10-30 15:17:33 +04:00
if ( ! atomic_inc_not_zero ( & tt_common_entry - > refcount ) )
2011-04-27 16:28:07 +04:00
continue ;
2011-10-30 15:17:33 +04:00
tt_common_entry_tmp = tt_common_entry ;
2011-02-18 15:28:09 +03:00
break ;
}
rcu_read_unlock ( ) ;
2011-10-30 15:17:33 +04:00
return tt_common_entry_tmp ;
2011-02-18 15:28:09 +03:00
}
2012-06-06 00:31:31 +04:00
static struct batadv_tt_local_entry *
batadv_tt_local_hash_find ( struct batadv_priv * bat_priv , const void * data )
2011-02-18 15:28:09 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_local_entry * tt_local_entry = NULL ;
2011-02-18 15:28:09 +03:00
2012-07-16 00:26:51 +04:00
tt_common_entry = batadv_tt_hash_find ( bat_priv - > tt . local_hash , data ) ;
2011-10-30 15:17:33 +04:00
if ( tt_common_entry )
tt_local_entry = container_of ( tt_common_entry ,
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry ,
common ) ;
2011-10-30 15:17:33 +04:00
return tt_local_entry ;
}
2011-02-18 15:28:09 +03:00
2012-06-06 00:31:31 +04:00
static struct batadv_tt_global_entry *
batadv_tt_global_hash_find ( struct batadv_priv * bat_priv , const void * data )
2011-10-30 15:17:33 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_global_entry * tt_global_entry = NULL ;
2011-04-27 16:28:07 +04:00
2012-07-16 00:26:51 +04:00
tt_common_entry = batadv_tt_hash_find ( bat_priv - > tt . global_hash , data ) ;
2011-10-30 15:17:33 +04:00
if ( tt_common_entry )
tt_global_entry = container_of ( tt_common_entry ,
2012-06-06 00:31:31 +04:00
struct batadv_tt_global_entry ,
common ) ;
2011-10-30 15:17:33 +04:00
return tt_global_entry ;
2011-02-18 15:28:09 +03:00
}
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_local_entry_free_ref ( struct batadv_tt_local_entry * tt_local_entry )
2011-04-27 16:28:07 +04:00
{
2011-10-30 15:17:33 +04:00
if ( atomic_dec_and_test ( & tt_local_entry - > common . refcount ) )
kfree_rcu ( tt_local_entry , common . rcu ) ;
2011-04-27 16:28:07 +04:00
}
2012-05-16 22:23:16 +04:00
static void batadv_tt_global_entry_free_rcu ( struct rcu_head * rcu )
2011-10-19 13:02:25 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_global_entry * tt_global_entry ;
2011-10-19 13:02:25 +04:00
2012-06-06 00:31:31 +04:00
tt_common_entry = container_of ( rcu , struct batadv_tt_common_entry , rcu ) ;
tt_global_entry = container_of ( tt_common_entry ,
struct batadv_tt_global_entry , common ) ;
2011-10-19 13:02:25 +04:00
kfree ( tt_global_entry ) ;
}
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_entry_free_ref ( struct batadv_tt_global_entry * tt_global_entry )
2011-04-27 16:28:07 +04:00
{
2011-10-22 22:12:51 +04:00
if ( atomic_dec_and_test ( & tt_global_entry - > common . refcount ) ) {
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_orig_list ( tt_global_entry ) ;
2011-10-30 15:17:33 +04:00
call_rcu ( & tt_global_entry - > common . rcu ,
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_rcu ) ;
2011-10-22 22:12:51 +04:00
}
}
2012-05-16 22:23:16 +04:00
static void batadv_tt_orig_list_entry_free_rcu ( struct rcu_head * rcu )
2011-10-22 22:12:51 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-10-22 22:12:51 +04:00
2012-06-06 00:31:31 +04:00
orig_entry = container_of ( rcu , struct batadv_tt_orig_list_entry , rcu ) ;
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_entry - > orig_node ) ;
2011-10-22 22:12:51 +04:00
kfree ( orig_entry ) ;
}
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_orig_list_entry_free_ref ( struct batadv_tt_orig_list_entry * orig_entry )
2011-10-22 22:12:51 +04:00
{
2012-07-01 16:09:12 +04:00
if ( ! atomic_dec_and_test ( & orig_entry - > refcount ) )
return ;
2012-06-26 00:49:51 +04:00
/* to avoid race conditions, immediately decrease the tt counter */
atomic_dec ( & orig_entry - > orig_node - > tt_size ) ;
2012-05-16 22:23:16 +04:00
call_rcu ( & orig_entry - > rcu , batadv_tt_orig_list_entry_free_rcu ) ;
2011-04-27 16:28:07 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_local_event ( struct batadv_priv * bat_priv ,
2012-05-16 22:23:16 +04:00
const uint8_t * addr , uint8_t flags )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_change_node * tt_change_node , * entry , * safe ;
2012-05-25 02:00:42 +04:00
bool event_removed = false ;
bool del_op_requested , del_op_entry ;
2011-04-27 16:27:44 +04:00
tt_change_node = kmalloc ( sizeof ( * tt_change_node ) , GFP_ATOMIC ) ;
if ( ! tt_change_node )
return ;
2011-06-30 03:14:00 +04:00
tt_change_node - > change . flags = flags ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_change_node - > change . addr , addr , ETH_ALEN ) ;
2012-06-04 00:19:21 +04:00
del_op_requested = flags & BATADV_TT_CLIENT_DEL ;
2012-05-25 02:00:42 +04:00
/* check for ADD+DEL or DEL+ADD events */
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . changes_list_lock ) ;
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt . changes_list ,
2012-05-25 02:00:42 +04:00
list ) {
if ( ! batadv_compare_eth ( entry - > change . addr , addr ) )
continue ;
/* DEL+ADD in the same orig interval have no effect and can be
* removed to avoid silly behaviour on the receiver side . The
* other way around ( ADD + DEL ) can happen in case of roaming of
* a client still in the NEW state . Roaming of NEW clients is
* now possible due to automatically recognition of " temporary "
* clients
*/
2012-06-04 00:19:21 +04:00
del_op_entry = entry - > change . flags & BATADV_TT_CLIENT_DEL ;
2012-05-25 02:00:42 +04:00
if ( ! del_op_requested & & del_op_entry )
goto del ;
if ( del_op_requested & & ! del_op_entry )
goto del ;
continue ;
del :
list_del ( & entry - > list ) ;
kfree ( entry ) ;
2012-08-07 12:32:34 +04:00
kfree ( tt_change_node ) ;
2012-05-25 02:00:42 +04:00
event_removed = true ;
goto unlock ;
}
2011-04-27 16:27:44 +04:00
/* track the change in the OGMinterval list */
2012-07-16 00:26:51 +04:00
list_add_tail ( & tt_change_node - > list , & bat_priv - > tt . changes_list ) ;
2012-05-25 02:00:42 +04:00
unlock :
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . changes_list_lock ) ;
2011-04-27 16:27:44 +04:00
2012-05-25 02:00:42 +04:00
if ( event_removed )
2012-07-16 00:26:51 +04:00
atomic_dec ( & bat_priv - > tt . local_changes ) ;
2012-05-25 02:00:42 +04:00
else
2012-07-16 00:26:51 +04:00
atomic_inc ( & bat_priv - > tt . local_changes ) ;
2011-04-27 16:27:44 +04:00
}
2012-05-12 04:09:39 +04:00
int batadv_tt_len ( int changes_num )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:30 +04:00
return changes_num * sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
static int batadv_tt_local_init ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-07-16 00:26:51 +04:00
if ( bat_priv - > tt . local_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
bat_priv - > tt . local_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
if ( ! bat_priv - > tt . local_hash )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:39 +04:00
void batadv_tt_local_add ( struct net_device * soft_iface , const uint8_t * addr ,
int ifindex )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( soft_iface ) ;
struct batadv_tt_local_entry * tt_local_entry = NULL ;
struct batadv_tt_global_entry * tt_global_entry = NULL ;
2011-10-22 22:12:51 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2010-12-13 14:19:28 +03:00
2012-05-16 22:23:16 +04:00
tt_local_entry = batadv_tt_local_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( tt_local_entry ) {
tt_local_entry - > last_seen = jiffies ;
2012-06-04 00:19:21 +04:00
/* possibly unset the BATADV_TT_CLIENT_PENDING flag */
tt_local_entry - > common . flags & = ~ BATADV_TT_CLIENT_PENDING ;
2011-04-27 16:28:07 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-05-15 01:14:54 +04:00
tt_local_entry = kmalloc ( sizeof ( * tt_local_entry ) , GFP_ATOMIC ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_local_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Creating new local tt entry: %pM (ttvn: %d) \n " , addr ,
2012-07-16 00:26:51 +04:00
( uint8_t ) atomic_read ( & bat_priv - > tt . vn ) ) ;
2010-12-13 14:19:28 +03:00
2011-10-30 15:17:33 +04:00
memcpy ( tt_local_entry - > common . addr , addr , ETH_ALEN ) ;
2012-06-04 00:19:17 +04:00
tt_local_entry - > common . flags = BATADV_NO_FLAGS ;
2012-05-12 04:09:31 +04:00
if ( batadv_is_wifi_iface ( ifindex ) )
2012-06-04 00:19:21 +04:00
tt_local_entry - > common . flags | = BATADV_TT_CLIENT_WIFI ;
2011-10-30 15:17:33 +04:00
atomic_set ( & tt_local_entry - > common . refcount , 2 ) ;
tt_local_entry - > last_seen = jiffies ;
2012-07-06 01:38:29 +04:00
tt_local_entry - > common . added_at = tt_local_entry - > last_seen ;
2010-12-13 14:19:28 +03:00
/* the batman interface mac address should never be purged */
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( addr , soft_iface - > dev_addr ) )
2012-06-04 00:19:21 +04:00
tt_local_entry - > common . flags | = BATADV_TT_CLIENT_NOPURGE ;
2010-12-13 14:19:28 +03:00
2012-01-07 00:31:33 +04:00
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
2012-05-12 04:09:43 +04:00
* ( consistency check )
*/
2012-06-04 00:19:21 +04:00
tt_local_entry - > common . flags | = BATADV_TT_CLIENT_NEW ;
2012-01-07 00:31:33 +04:00
2012-07-16 00:26:51 +04:00
hash_added = batadv_hash_add ( bat_priv - > tt . local_hash , batadv_compare_tt ,
2012-05-12 15:48:56 +04:00
batadv_choose_orig ,
& tt_local_entry - > common ,
2012-05-12 15:48:55 +04:00
& tt_local_entry - > common . hash_entry ) ;
2011-11-02 23:26:45 +04:00
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2011-11-02 23:26:45 +04:00
goto out ;
}
2012-05-16 22:23:16 +04:00
batadv_tt_local_event ( bat_priv , addr , tt_local_entry - > common . flags ) ;
2011-06-30 03:14:00 +04:00
2010-12-13 14:19:28 +03:00
/* remove address from global hash if present */
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:57 +04:00
/* Check whether it is a roaming! */
if ( tt_global_entry ) {
2011-10-22 22:12:51 +04:00
/* These node are probably going to update their tt table */
head = & tt_global_entry - > orig_list ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
orig_entry - > orig_node - > tt_poss_change = true ;
2012-05-16 22:23:16 +04:00
batadv_send_roam_adv ( bat_priv ,
tt_global_entry - > common . addr ,
orig_entry - > orig_node ) ;
2011-10-22 22:12:51 +04:00
}
rcu_read_unlock ( ) ;
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
*/
2012-06-04 00:19:21 +04:00
tt_global_entry - > common . flags | = BATADV_TT_CLIENT_ROAM ;
2011-12-04 15:26:50 +04:00
tt_global_entry - > roam_at = jiffies ;
2011-04-27 16:28:07 +04:00
}
out :
if ( tt_local_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:28:07 +04:00
if ( tt_global_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-16 22:23:16 +04:00
static void batadv_tt_realloc_packet_buff ( unsigned char * * packet_buff ,
int * packet_buff_len ,
int min_packet_len ,
int new_packet_len )
2012-05-07 00:22:05 +04:00
{
unsigned char * new_buff ;
new_buff = kmalloc ( new_packet_len , GFP_ATOMIC ) ;
/* keep old buffer if kmalloc should fail */
if ( new_buff ) {
memcpy ( new_buff , * packet_buff , min_packet_len ) ;
kfree ( * packet_buff ) ;
* packet_buff = new_buff ;
* packet_buff_len = new_packet_len ;
}
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_prepare_packet_buff ( struct batadv_priv * bat_priv ,
2012-05-16 22:23:16 +04:00
unsigned char * * packet_buff ,
int * packet_buff_len ,
int min_packet_len )
2012-05-07 00:22:05 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if ;
2012-05-07 00:22:05 +04:00
int req_len ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2012-05-07 00:22:05 +04:00
req_len = min_packet_len ;
2012-07-16 00:26:51 +04:00
req_len + = batadv_tt_len ( atomic_read ( & bat_priv - > tt . local_changes ) ) ;
2012-05-07 00:22:05 +04:00
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
*/
if ( ( ! primary_if ) | | ( req_len > primary_if - > soft_iface - > mtu ) )
req_len = min_packet_len ;
2012-05-16 22:23:16 +04:00
batadv_tt_realloc_packet_buff ( packet_buff , packet_buff_len ,
min_packet_len , req_len ) ;
2012-05-07 00:22:05 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2012-05-07 00:22:05 +04:00
}
2012-06-06 00:31:31 +04:00
static int batadv_tt_changes_fill_buff ( struct batadv_priv * bat_priv ,
2012-05-16 22:23:16 +04:00
unsigned char * * packet_buff ,
int * packet_buff_len ,
int min_packet_len )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_change_node * entry , * safe ;
2012-05-07 00:22:05 +04:00
int count = 0 , tot_changes = 0 , new_len ;
unsigned char * tt_buff ;
2012-05-16 22:23:16 +04:00
batadv_tt_prepare_packet_buff ( bat_priv , packet_buff ,
packet_buff_len , min_packet_len ) ;
2010-12-13 14:19:28 +03:00
2012-05-07 00:22:05 +04:00
new_len = * packet_buff_len - min_packet_len ;
tt_buff = * packet_buff + min_packet_len ;
if ( new_len > 0 )
2012-05-12 04:09:39 +04:00
tot_changes = new_len / batadv_tt_len ( 1 ) ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . changes_list_lock ) ;
atomic_set ( & bat_priv - > tt . local_changes , 0 ) ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt . changes_list ,
2012-02-28 13:55:36 +04:00
list ) {
2011-04-27 16:27:44 +04:00
if ( count < tot_changes ) {
2012-05-12 04:09:39 +04:00
memcpy ( tt_buff + batadv_tt_len ( count ) ,
2012-06-06 00:31:30 +04:00
& entry - > change , sizeof ( struct batadv_tt_change ) ) ;
2010-12-13 14:19:28 +03:00
count + + ;
}
2011-04-27 16:27:44 +04:00
list_del ( & entry - > list ) ;
kfree ( entry ) ;
2010-12-13 14:19:28 +03:00
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . changes_list_lock ) ;
2011-04-27 16:27:44 +04:00
/* Keep the buffer for possible tt_request */
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . last_changeset_lock ) ;
kfree ( bat_priv - > tt . last_changeset ) ;
bat_priv - > tt . last_changeset_len = 0 ;
bat_priv - > tt . last_changeset = NULL ;
2012-05-07 00:22:05 +04:00
/* check whether this new OGM has no changes due to size problems */
if ( new_len > 0 ) {
/* if kmalloc() fails we will reply with the full table
2011-04-27 16:27:44 +04:00
* instead of providing the diff
*/
2012-07-16 00:26:51 +04:00
bat_priv - > tt . last_changeset = kmalloc ( new_len , GFP_ATOMIC ) ;
if ( bat_priv - > tt . last_changeset ) {
memcpy ( bat_priv - > tt . last_changeset , tt_buff , new_len ) ;
bat_priv - > tt . last_changeset_len = new_len ;
2011-04-27 16:27:44 +04:00
}
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . last_changeset_lock ) ;
2010-12-13 14:19:28 +03:00
2012-04-23 12:32:55 +04:00
return count ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:39 +04:00
int batadv_tt_local_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( net_dev ) ;
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . local_hash ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
2012-08-03 19:15:46 +04:00
primary_if = batadv_seq_print_text_primary_if_get ( seq ) ;
if ( ! primary_if )
2011-04-20 17:40:58 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2012-03-07 12:07:45 +04:00
seq_printf ( seq ,
" Locally retrieved addresses (from %s) announced via TT (TTVN: %u): \n " ,
2012-07-16 00:26:51 +04:00
net_dev - > name , ( uint8_t ) atomic_read ( & bat_priv - > tt . vn ) ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-22 20:15:26 +04:00
seq_printf ( seq , " * %pM [%c%c%c%c%c] \n " ,
2012-02-28 13:55:36 +04:00
tt_common_entry - > addr ,
( tt_common_entry - > flags &
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
2012-02-28 13:55:36 +04:00
( tt_common_entry - > flags &
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_NOPURGE ? ' P ' : ' . ' ) ,
2012-02-28 13:55:36 +04:00
( tt_common_entry - > flags &
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_NEW ? ' N ' : ' . ' ) ,
2012-02-28 13:55:36 +04:00
( tt_common_entry - > flags &
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_PENDING ? ' X ' : ' . ' ) ,
2012-02-28 13:55:36 +04:00
( tt_common_entry - > flags &
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_WIFI ? ' W ' : ' . ' ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2012-08-03 19:15:46 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static void
batadv_tt_local_set_pending ( struct batadv_priv * bat_priv ,
struct batadv_tt_local_entry * tt_local_entry ,
uint16_t flags , const char * message )
2010-12-13 14:19:28 +03:00
{
2012-05-16 22:23:16 +04:00
batadv_tt_local_event ( bat_priv , tt_local_entry - > common . addr ,
tt_local_entry - > common . flags | flags ) ;
2011-04-27 16:27:44 +04:00
2011-07-09 19:52:13 +04:00
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
2012-05-12 04:09:43 +04:00
* response issued before the net ttvn increment ( consistency check )
*/
2012-06-04 00:19:21 +04:00
tt_local_entry - > common . flags | = BATADV_TT_CLIENT_PENDING ;
2012-01-07 00:31:34 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Local tt entry (%pM) pending to be removed: %s \n " ,
tt_local_entry - > common . addr , message ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
void batadv_tt_local_remove ( struct batadv_priv * bat_priv , const uint8_t * addr ,
2012-05-12 04:09:39 +04:00
const char * message , bool roaming )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry * tt_local_entry = NULL ;
2012-06-04 00:19:17 +04:00
uint16_t flags ;
2010-12-13 14:19:28 +03:00
2012-05-16 22:23:16 +04:00
tt_local_entry = batadv_tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2012-06-04 00:19:21 +04:00
flags = BATADV_TT_CLIENT_DEL ;
2012-06-04 00:19:17 +04:00
if ( roaming )
2012-06-04 00:19:21 +04:00
flags | = BATADV_TT_CLIENT_ROAM ;
2012-06-04 00:19:17 +04:00
batadv_tt_local_set_pending ( bat_priv , tt_local_entry , flags , message ) ;
2011-04-27 16:28:07 +04:00
out :
if ( tt_local_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_local_purge_list ( struct batadv_priv * bat_priv ,
2012-06-04 00:19:21 +04:00
struct hlist_head * head )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry * tt_local_entry ;
struct batadv_tt_common_entry * tt_common_entry ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2012-06-04 00:19:21 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp , head ,
hash_entry ) {
tt_local_entry = container_of ( tt_common_entry ,
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry ,
common ) ;
2012-06-04 00:19:21 +04:00
if ( tt_local_entry - > common . flags & BATADV_TT_CLIENT_NOPURGE )
continue ;
/* entry already marked for deletion */
if ( tt_local_entry - > common . flags & BATADV_TT_CLIENT_PENDING )
continue ;
if ( ! batadv_has_timed_out ( tt_local_entry - > last_seen ,
BATADV_TT_LOCAL_TIMEOUT ) )
continue ;
batadv_tt_local_set_pending ( bat_priv , tt_local_entry ,
BATADV_TT_CLIENT_DEL , " timed out " ) ;
}
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_local_purge ( struct batadv_priv * bat_priv )
2012-06-04 00:19:21 +04:00
{
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . local_hash ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2012-06-04 00:19:21 +04:00
batadv_tt_local_purge_list ( bat_priv , head ) ;
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_local_table_free ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash ;
2011-04-27 16:27:44 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_local_entry * tt_local ;
2011-04-27 16:28:07 +04:00
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
if ( ! bat_priv - > tt . local_hash )
2010-12-13 14:19:28 +03:00
return ;
2012-07-16 00:26:51 +04:00
hash = bat_priv - > tt . local_hash ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2012-06-06 00:31:31 +04:00
tt_local = container_of ( tt_common_entry ,
struct batadv_tt_local_entry ,
common ) ;
batadv_tt_local_entry_free_ref ( tt_local ) ;
2011-04-27 16:27:44 +04:00
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
bat_priv - > tt . local_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static int batadv_tt_global_init ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-07-16 00:26:51 +04:00
if ( bat_priv - > tt . global_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
bat_priv - > tt . global_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
if ( ! bat_priv - > tt . global_hash )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_changes_list_free ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_change_node * entry , * safe ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . changes_list_lock ) ;
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt . changes_list ,
2011-04-27 16:27:44 +04:00
list ) {
list_del ( & entry - > list ) ;
kfree ( entry ) ;
}
2010-12-13 14:19:28 +03:00
2012-07-16 00:26:51 +04:00
atomic_set ( & bat_priv - > tt . local_changes , 0 ) ;
spin_unlock_bh ( & bat_priv - > tt . changes_list_lock ) ;
2011-04-27 16:27:44 +04:00
}
2010-12-13 14:19:28 +03:00
2012-07-01 16:09:12 +04:00
/* retrieves the orig_tt_list_entry belonging to orig_node from the
* batadv_tt_global_entry list
*
* returns it with an increased refcounter , NULL if not found
2011-10-22 22:12:51 +04:00
*/
2012-07-01 16:09:12 +04:00
static struct batadv_tt_orig_list_entry *
batadv_tt_global_orig_entry_find ( const struct batadv_tt_global_entry * entry ,
const struct batadv_orig_node * orig_node )
2011-10-22 22:12:51 +04:00
{
2012-07-01 16:09:12 +04:00
struct batadv_tt_orig_list_entry * tmp_orig_entry , * orig_entry = NULL ;
2011-10-22 22:12:51 +04:00
const struct hlist_head * head ;
struct hlist_node * node ;
rcu_read_lock ( ) ;
head = & entry - > orig_list ;
hlist_for_each_entry_rcu ( tmp_orig_entry , node , head , list ) {
2012-07-01 16:09:12 +04:00
if ( tmp_orig_entry - > orig_node ! = orig_node )
continue ;
if ( ! atomic_inc_not_zero ( & tmp_orig_entry - > refcount ) )
continue ;
orig_entry = tmp_orig_entry ;
break ;
2011-10-22 22:12:51 +04:00
}
rcu_read_unlock ( ) ;
2012-07-01 16:09:12 +04:00
return orig_entry ;
}
/* find out if an orig_node is already in the list of a tt_global_entry.
* returns true if found , false otherwise
*/
static bool
batadv_tt_global_entry_has_orig ( const struct batadv_tt_global_entry * entry ,
const struct batadv_orig_node * orig_node )
{
struct batadv_tt_orig_list_entry * orig_entry ;
bool found = false ;
orig_entry = batadv_tt_global_orig_entry_find ( entry , orig_node ) ;
if ( orig_entry ) {
found = true ;
batadv_tt_orig_list_entry_free_ref ( orig_entry ) ;
}
2011-10-22 22:12:51 +04:00
return found ;
}
2012-05-16 22:23:16 +04:00
static void
2012-07-01 16:09:12 +04:00
batadv_tt_global_orig_entry_add ( struct batadv_tt_global_entry * tt_global ,
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node , int ttvn )
2011-10-22 22:12:51 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-10-22 22:12:51 +04:00
2012-07-01 16:09:12 +04:00
orig_entry = batadv_tt_global_orig_entry_find ( tt_global , orig_node ) ;
2012-07-06 01:38:29 +04:00
if ( orig_entry ) {
/* refresh the ttvn: the current value could be a bogus one that
* was added during a " temporary client detection "
*/
orig_entry - > ttvn = ttvn ;
2012-07-01 16:09:12 +04:00
goto out ;
2012-07-06 01:38:29 +04:00
}
2012-07-01 16:09:12 +04:00
2011-10-22 22:12:51 +04:00
orig_entry = kzalloc ( sizeof ( * orig_entry ) , GFP_ATOMIC ) ;
if ( ! orig_entry )
2012-07-01 16:09:12 +04:00
goto out ;
2011-10-22 22:12:51 +04:00
INIT_HLIST_NODE ( & orig_entry - > list ) ;
atomic_inc ( & orig_node - > refcount ) ;
atomic_inc ( & orig_node - > tt_size ) ;
orig_entry - > orig_node = orig_node ;
orig_entry - > ttvn = ttvn ;
2012-07-01 16:09:12 +04:00
atomic_set ( & orig_entry - > refcount , 2 ) ;
2011-10-22 22:12:51 +04:00
2012-07-01 16:09:12 +04:00
spin_lock_bh ( & tt_global - > list_lock ) ;
2011-10-22 22:12:51 +04:00
hlist_add_head_rcu ( & orig_entry - > list ,
2012-07-01 16:09:12 +04:00
& tt_global - > orig_list ) ;
spin_unlock_bh ( & tt_global - > list_lock ) ;
out :
if ( orig_entry )
batadv_tt_orig_list_entry_free_ref ( orig_entry ) ;
2011-10-22 22:12:51 +04:00
}
2011-04-27 16:27:44 +04:00
/* caller must hold orig_node refcount */
2012-06-06 00:31:31 +04:00
int batadv_tt_global_add ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-05-25 02:00:54 +04:00
const unsigned char * tt_addr , uint8_t flags ,
uint8_t ttvn )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_global_entry * tt_global_entry = NULL ;
2011-04-27 16:28:07 +04:00
int ret = 0 ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * common ;
2010-12-13 14:19:28 +03:00
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , tt_addr ) ;
2011-04-27 16:27:44 +04:00
if ( ! tt_global_entry ) {
2012-05-25 02:00:54 +04:00
tt_global_entry = kzalloc ( sizeof ( * tt_global_entry ) , GFP_ATOMIC ) ;
2011-04-27 16:27:44 +04:00
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2012-05-12 15:48:55 +04:00
common = & tt_global_entry - > common ;
memcpy ( common - > addr , tt_addr , ETH_ALEN ) ;
2011-10-22 22:12:51 +04:00
2012-05-25 02:00:54 +04:00
common - > flags = flags ;
2011-04-27 16:27:57 +04:00
tt_global_entry - > roam_at = 0 ;
2012-05-12 15:48:55 +04:00
atomic_set ( & common - > refcount , 2 ) ;
2012-07-06 01:38:29 +04:00
common - > added_at = jiffies ;
2011-10-22 22:12:51 +04:00
INIT_HLIST_HEAD ( & tt_global_entry - > orig_list ) ;
spin_lock_init ( & tt_global_entry - > list_lock ) ;
2011-04-27 16:28:07 +04:00
2012-07-16 00:26:51 +04:00
hash_added = batadv_hash_add ( bat_priv - > tt . global_hash ,
2012-05-16 22:23:16 +04:00
batadv_compare_tt ,
batadv_choose_orig , common ,
& common - > hash_entry ) ;
2011-11-02 23:26:45 +04:00
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2011-11-02 23:26:45 +04:00
goto out_remove ;
}
2011-04-27 16:27:44 +04:00
} else {
2012-07-06 01:38:29 +04:00
/* If there is already a global entry, we can use this one for
* our processing .
* But if we are trying to add a temporary client we can exit
* directly because the temporary information should never
* override any already known client state ( whatever it is )
*/
if ( flags & BATADV_TT_CLIENT_TEMP )
goto out ;
/* if the client was temporary added before receiving the first
* OGM announcing it , we have to clear the TEMP flag
*/
tt_global_entry - > common . flags & = ~ BATADV_TT_CLIENT_TEMP ;
2011-10-22 22:12:51 +04:00
2012-06-04 00:19:21 +04:00
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
2011-10-22 22:12:51 +04:00
* delete + roaming change for this originator .
*
* We should first delete the old originator before adding the
* new one .
*/
2012-06-04 00:19:21 +04:00
if ( tt_global_entry - > common . flags & BATADV_TT_CLIENT_ROAM ) {
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_orig_list ( tt_global_entry ) ;
2012-06-04 00:19:21 +04:00
tt_global_entry - > common . flags & = ~ BATADV_TT_CLIENT_ROAM ;
2011-10-22 22:12:51 +04:00
tt_global_entry - > roam_at = 0 ;
2011-04-27 16:27:44 +04:00
}
}
2012-07-06 01:38:29 +04:00
/* add the new orig_entry (if needed) or update it */
2012-07-01 16:09:12 +04:00
batadv_tt_global_orig_entry_add ( tt_global_entry , orig_node , ttvn ) ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Creating new global tt entry: %pM (via %pM) \n " ,
tt_global_entry - > common . addr , orig_node - > orig ) ;
2012-08-11 13:11:00 +04:00
ret = 1 ;
2010-12-13 14:19:28 +03:00
2011-11-02 23:26:45 +04:00
out_remove :
2011-04-27 16:27:44 +04:00
/* remove address from local hash if present */
2012-05-12 04:09:39 +04:00
batadv_tt_local_remove ( bat_priv , tt_global_entry - > common . addr ,
2012-06-04 00:19:21 +04:00
" global tt received " ,
flags & BATADV_TT_CLIENT_ROAM ) ;
2011-04-27 16:28:07 +04:00
out :
if ( tt_global_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2011-04-27 16:28:07 +04:00
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* print all orig nodes who announce the address for this global entry.
* it is assumed that the caller holds rcu_read_lock ( ) ;
*/
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_print_entry ( struct batadv_tt_global_entry * tt_global_entry ,
2012-05-16 22:23:16 +04:00
struct seq_file * seq )
2011-10-22 22:12:51 +04:00
{
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
struct batadv_tt_common_entry * tt_common_entry ;
2011-10-22 22:12:51 +04:00
uint16_t flags ;
uint8_t last_ttvn ;
tt_common_entry = & tt_global_entry - > common ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
flags = tt_common_entry - > flags ;
last_ttvn = atomic_read ( & orig_entry - > orig_node - > last_ttvn ) ;
2012-07-06 01:38:29 +04:00
seq_printf ( seq , " * %pM (%3u) via %pM (%3u) [%c%c%c] \n " ,
2011-10-22 22:12:51 +04:00
tt_global_entry - > common . addr , orig_entry - > ttvn ,
orig_entry - > orig_node - > orig , last_ttvn ,
2012-06-04 00:19:21 +04:00
( flags & BATADV_TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
2012-07-06 01:38:29 +04:00
( flags & BATADV_TT_CLIENT_WIFI ? ' W ' : ' . ' ) ,
( flags & BATADV_TT_CLIENT_TEMP ? ' T ' : ' . ' ) ) ;
2011-10-22 22:12:51 +04:00
}
}
2012-05-12 04:09:39 +04:00
int batadv_tt_global_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( net_dev ) ;
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . global_hash ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_global_entry * tt_global ;
struct batadv_hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
2012-08-03 19:15:46 +04:00
primary_if = batadv_seq_print_text_primary_if_get ( seq ) ;
if ( ! primary_if )
2011-04-20 17:40:58 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
seq_printf ( seq ,
" Globally announced TT entries received via the mesh %s \n " ,
2010-12-13 14:19:28 +03:00
net_dev - > name ) ;
2011-07-07 17:35:38 +04:00
seq_printf ( seq , " %-13s %s %-15s %s %s \n " ,
" Client " , " (TTVN) " , " Originator " , " (Curr TTVN) " , " Flags " ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2012-06-06 00:31:31 +04:00
tt_global = container_of ( tt_common_entry ,
struct batadv_tt_global_entry ,
common ) ;
batadv_tt_global_print_entry ( tt_global , seq ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2012-08-03 19:15:46 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* deletes the orig list of a tt_global_entry */
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_del_orig_list ( struct batadv_tt_global_entry * tt_global_entry )
2010-12-13 14:19:28 +03:00
{
2011-10-22 22:12:51 +04:00
struct hlist_head * head ;
struct hlist_node * node , * safe ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-04-27 16:27:44 +04:00
2011-10-22 22:12:51 +04:00
spin_lock_bh ( & tt_global_entry - > list_lock ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_safe ( orig_entry , node , safe , head , list ) {
hlist_del_rcu ( node ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_orig_list_entry_free_ref ( orig_entry ) ;
2011-10-22 22:12:51 +04:00
}
spin_unlock_bh ( & tt_global_entry - > list_lock ) ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
}
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_del_orig_entry ( struct batadv_priv * bat_priv ,
struct batadv_tt_global_entry * tt_global_entry ,
struct batadv_orig_node * orig_node ,
2012-05-16 22:23:16 +04:00
const char * message )
2011-10-22 22:12:51 +04:00
{
struct hlist_head * head ;
struct hlist_node * node , * safe ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-10-22 22:12:51 +04:00
spin_lock_bh ( & tt_global_entry - > list_lock ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_safe ( orig_entry , node , safe , head , list ) {
if ( orig_entry - > orig_node = = orig_node ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Deleting %pM from global tt entry %pM: %s \n " ,
orig_node - > orig ,
tt_global_entry - > common . addr , message ) ;
2011-10-22 22:12:51 +04:00
hlist_del_rcu ( node ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_orig_list_entry_free_ref ( orig_entry ) ;
2011-10-22 22:12:51 +04:00
}
}
spin_unlock_bh ( & tt_global_entry - > list_lock ) ;
}
2012-06-06 00:31:31 +04:00
static void
batadv_tt_global_del_struct ( struct batadv_priv * bat_priv ,
struct batadv_tt_global_entry * tt_global_entry ,
const char * message )
2011-10-22 22:12:51 +04:00
{
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
" Deleting global tt entry %pM: %s \n " ,
2012-05-12 15:48:58 +04:00
tt_global_entry - > common . addr , message ) ;
2011-04-27 16:28:07 +04:00
2012-07-16 00:26:51 +04:00
batadv_hash_remove ( bat_priv - > tt . global_hash , batadv_compare_tt ,
2012-05-12 15:48:56 +04:00
batadv_choose_orig , tt_global_entry - > common . addr ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2011-10-22 22:12:51 +04:00
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* If the client is to be deleted, we check if it is the last origantor entry
2012-06-04 00:19:21 +04:00
* within tt_global entry . If yes , we set the BATADV_TT_CLIENT_ROAM flag and the
* timer , otherwise we simply remove the originator scheduled for deletion .
2011-10-22 22:12:51 +04:00
*/
2012-05-16 22:23:16 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_tt_global_del_roaming ( struct batadv_priv * bat_priv ,
struct batadv_tt_global_entry * tt_global_entry ,
struct batadv_orig_node * orig_node ,
const char * message )
2011-10-22 22:12:51 +04:00
{
bool last_entry = true ;
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-10-22 22:12:51 +04:00
/* no local entry exists, case 1:
* Check if this is the last one or if other entries exist .
*/
rcu_read_lock ( ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
if ( orig_entry - > orig_node ! = orig_node ) {
last_entry = false ;
break ;
}
}
rcu_read_unlock ( ) ;
if ( last_entry ) {
/* its the last one, mark for roaming. */
2012-06-04 00:19:21 +04:00
tt_global_entry - > common . flags | = BATADV_TT_CLIENT_ROAM ;
2011-10-22 22:12:51 +04:00
tt_global_entry - > roam_at = jiffies ;
} else
/* there is another entry, we can simply delete this
* one and can still use the other one .
*/
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_orig_entry ( bat_priv , tt_global_entry ,
orig_node , message ) ;
2011-10-22 22:12:51 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_global_del ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-05-16 22:23:16 +04:00
const unsigned char * addr ,
const char * message , bool roaming )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_global_entry * tt_global_entry = NULL ;
struct batadv_tt_local_entry * local_entry = NULL ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , addr ) ;
2011-10-22 22:12:51 +04:00
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
2011-10-22 22:12:51 +04:00
if ( ! roaming ) {
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_orig_entry ( bat_priv , tt_global_entry ,
orig_node , message ) ;
2011-10-22 22:12:51 +04:00
if ( hlist_empty ( & tt_global_entry - > orig_list ) )
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_struct ( bat_priv , tt_global_entry ,
message ) ;
2011-10-22 22:12:51 +04:00
goto out ;
}
2011-12-22 16:31:12 +04:00
/* if we are deleting a global entry due to a roam
* event , there are two possibilities :
2011-10-22 22:12:51 +04:00
* 1 ) the client roamed from node A to node B = > if there
* is only one originator left for this client , we mark
2012-06-04 00:19:21 +04:00
* it with BATADV_TT_CLIENT_ROAM , we start a timer and we
2011-12-22 16:31:12 +04:00
* wait for node B to claim it . In case of timeout
* the entry is purged .
2011-10-22 22:12:51 +04:00
*
* If there are other originators left , we directly delete
* the originator .
2011-12-22 16:31:12 +04:00
* 2 ) the client roamed to us = > we can directly delete
2012-05-12 04:09:43 +04:00
* the global entry , since it is useless now .
*/
2012-05-16 22:23:16 +04:00
local_entry = batadv_tt_local_hash_find ( bat_priv ,
tt_global_entry - > common . addr ) ;
if ( local_entry ) {
2011-10-22 22:12:51 +04:00
/* local entry exists, case 2: client roamed to us. */
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_orig_list ( tt_global_entry ) ;
batadv_tt_global_del_struct ( bat_priv , tt_global_entry , message ) ;
2011-10-22 22:12:51 +04:00
} else
/* no local entry exists, case 1: check for roaming */
2012-05-16 22:23:16 +04:00
batadv_tt_global_del_roaming ( bat_priv , tt_global_entry ,
orig_node , message ) ;
2011-12-22 16:31:12 +04:00
2011-04-27 16:27:57 +04:00
out :
2011-04-27 16:28:07 +04:00
if ( tt_global_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
if ( local_entry )
batadv_tt_local_entry_free_ref ( local_entry ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
void batadv_tt_global_del_orig ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
const char * message )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_global_entry * tt_global ;
struct batadv_tt_common_entry * tt_common_entry ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . global_hash ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node , * safe ;
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2010-12-13 14:19:28 +03:00
2011-10-19 12:28:26 +04:00
if ( ! hash )
return ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , safe ,
2012-02-28 13:55:36 +04:00
head , hash_entry ) {
2012-06-06 00:31:31 +04:00
tt_global = container_of ( tt_common_entry ,
struct batadv_tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
2012-06-06 00:31:31 +04:00
batadv_tt_global_del_orig_entry ( bat_priv , tt_global ,
2012-05-16 22:23:16 +04:00
orig_node , message ) ;
2011-10-22 22:12:51 +04:00
2012-06-06 00:31:31 +04:00
if ( hlist_empty ( & tt_global - > orig_list ) ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Deleting global tt entry %pM: %s \n " ,
2012-06-06 00:31:31 +04:00
tt_global - > common . addr , message ) ;
2011-04-27 16:28:07 +04:00
hlist_del_rcu ( node ) ;
2012-06-06 00:31:31 +04:00
batadv_tt_global_entry_free_ref ( tt_global ) ;
2011-04-27 16:28:07 +04:00
}
2011-04-27 16:27:44 +04:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = false ;
2010-12-13 14:19:28 +03:00
}
2012-07-06 01:38:29 +04:00
static bool batadv_tt_global_to_purge ( struct batadv_tt_global_entry * tt_global ,
char * * msg )
2011-04-27 16:27:57 +04:00
{
2012-07-06 01:38:29 +04:00
bool purge = false ;
unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT ;
unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT ;
2012-06-04 00:19:17 +04:00
2012-07-06 01:38:29 +04:00
if ( ( tt_global - > common . flags & BATADV_TT_CLIENT_ROAM ) & &
batadv_has_timed_out ( tt_global - > roam_at , roam_timeout ) ) {
purge = true ;
* msg = " Roaming timeout \n " ;
}
2012-06-04 00:19:17 +04:00
2012-07-06 01:38:29 +04:00
if ( ( tt_global - > common . flags & BATADV_TT_CLIENT_TEMP ) & &
batadv_has_timed_out ( tt_global - > common . added_at , temp_timeout ) ) {
purge = true ;
* msg = " Temporary client timeout \n " ;
2012-06-04 00:19:17 +04:00
}
2012-07-06 01:38:29 +04:00
return purge ;
2012-06-04 00:19:17 +04:00
}
2012-07-06 01:38:29 +04:00
static void batadv_tt_global_purge ( struct batadv_priv * bat_priv )
2012-06-04 00:19:17 +04:00
{
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . global_hash ;
2011-04-27 16:27:57 +04:00
struct hlist_head * head ;
2012-07-06 01:38:29 +04:00
struct hlist_node * node , * node_tmp ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2012-07-06 01:38:29 +04:00
char * msg = NULL ;
struct batadv_tt_common_entry * tt_common ;
struct batadv_tt_global_entry * tt_global ;
2011-04-27 16:27:57 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2011-04-27 16:27:57 +04:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2012-07-06 01:38:29 +04:00
hlist_for_each_entry_safe ( tt_common , node , node_tmp , head ,
hash_entry ) {
tt_global = container_of ( tt_common ,
struct batadv_tt_global_entry ,
common ) ;
if ( ! batadv_tt_global_to_purge ( tt_global , & msg ) )
continue ;
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
" Deleting global tt entry (%pM): %s \n " ,
tt_global - > common . addr , msg ) ;
hlist_del_rcu ( node ) ;
batadv_tt_global_entry_free_ref ( tt_global ) ;
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2011-04-27 16:27:57 +04:00
}
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_global_table_free ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
struct batadv_tt_global_entry * tt_global ;
2011-04-27 16:28:07 +04:00
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:28:07 +04:00
2012-07-16 00:26:51 +04:00
if ( ! bat_priv - > tt . global_hash )
2010-12-13 14:19:28 +03:00
return ;
2012-07-16 00:26:51 +04:00
hash = bat_priv - > tt . global_hash ;
2011-04-27 16:28:07 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:28:07 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2012-06-06 00:31:31 +04:00
tt_global = container_of ( tt_common_entry ,
struct batadv_tt_global_entry ,
common ) ;
batadv_tt_global_entry_free_ref ( tt_global ) ;
2011-04-27 16:28:07 +04:00
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2011-04-27 16:28:07 +04:00
2012-07-16 00:26:51 +04:00
bat_priv - > tt . global_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static bool
_batadv_is_ap_isolated ( struct batadv_tt_local_entry * tt_local_entry ,
struct batadv_tt_global_entry * tt_global_entry )
2011-07-07 17:35:36 +04:00
{
bool ret = false ;
2012-06-04 00:19:21 +04:00
if ( tt_local_entry - > common . flags & BATADV_TT_CLIENT_WIFI & &
tt_global_entry - > common . flags & BATADV_TT_CLIENT_WIFI )
2011-07-07 17:35:36 +04:00
ret = true ;
return ret ;
}
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * batadv_transtable_search ( struct batadv_priv * bat_priv ,
const uint8_t * src ,
const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry * tt_local_entry = NULL ;
struct batadv_tt_global_entry * tt_global_entry = NULL ;
struct batadv_orig_node * orig_node = NULL ;
struct batadv_neigh_node * router = NULL ;
2011-10-22 22:12:51 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_orig_list_entry * orig_entry ;
2011-10-22 22:12:51 +04:00
int best_tq ;
2010-12-13 14:19:28 +03:00
2011-07-07 17:35:37 +04:00
if ( src & & atomic_read ( & bat_priv - > ap_isolation ) ) {
2012-05-16 22:23:16 +04:00
tt_local_entry = batadv_tt_local_hash_find ( bat_priv , src ) ;
2011-07-07 17:35:37 +04:00
if ( ! tt_local_entry )
goto out ;
}
2011-02-18 15:28:09 +03:00
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , addr ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_global_entry )
2011-02-18 15:28:10 +03:00
goto out ;
2011-02-18 15:28:09 +03:00
2011-07-07 17:35:37 +04:00
/* check whether the clients should not communicate due to AP
2012-05-12 04:09:43 +04:00
* isolation
*/
2012-05-16 22:23:16 +04:00
if ( tt_local_entry & &
_batadv_is_ap_isolated ( tt_local_entry , tt_global_entry ) )
2011-07-07 17:35:37 +04:00
goto out ;
2011-10-22 22:12:51 +04:00
best_tq = 0 ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
rcu_read_lock ( ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
2012-05-12 04:09:34 +04:00
router = batadv_orig_node_get_router ( orig_entry - > orig_node ) ;
2011-10-22 22:12:51 +04:00
if ( ! router )
continue ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
if ( router - > tq_avg > best_tq ) {
orig_node = orig_entry - > orig_node ;
best_tq = router - > tq_avg ;
}
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( router ) ;
2011-10-22 22:12:51 +04:00
}
/* found anything? */
if ( orig_node & & ! atomic_inc_not_zero ( & orig_node - > refcount ) )
orig_node = NULL ;
rcu_read_unlock ( ) ;
2011-02-18 15:28:10 +03:00
out :
2011-07-07 17:35:37 +04:00
if ( tt_global_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2011-07-07 17:35:37 +04:00
if ( tt_local_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2011-07-07 17:35:37 +04:00
2011-02-18 15:28:10 +03:00
return orig_node ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
/* Calculates the checksum of the local table of a given orig_node */
2012-06-06 00:31:31 +04:00
static uint16_t batadv_tt_global_crc ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node )
2011-04-27 16:27:44 +04:00
{
uint16_t total = 0 , total_one ;
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . global_hash ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common ;
struct batadv_tt_global_entry * tt_global ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2012-06-04 00:19:21 +04:00
hlist_for_each_entry_rcu ( tt_common , node , head , hash_entry ) {
2012-06-06 00:31:31 +04:00
tt_global = container_of ( tt_common ,
struct batadv_tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
/* Roaming clients are in the global table for
* consistency only . They don ' t have to be
* taken into account while computing the
* global crc
*/
2012-06-04 00:19:21 +04:00
if ( tt_common - > flags & BATADV_TT_CLIENT_ROAM )
2011-10-22 22:12:51 +04:00
continue ;
2012-07-06 01:38:29 +04:00
/* Temporary clients have not been announced yet, so
* they have to be skipped while computing the global
* crc
*/
if ( tt_common - > flags & BATADV_TT_CLIENT_TEMP )
continue ;
2011-10-22 22:12:51 +04:00
/* find out if this global entry is announced by this
* originator
*/
2012-06-06 00:31:31 +04:00
if ( ! batadv_tt_global_entry_has_orig ( tt_global ,
2012-05-16 22:23:16 +04:00
orig_node ) )
2011-10-22 22:12:51 +04:00
continue ;
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
2012-06-04 00:19:21 +04:00
tt_common - > addr [ j ] ) ;
2011-10-22 22:12:51 +04:00
total ^ = total_one ;
2011-04-27 16:27:44 +04:00
}
rcu_read_unlock ( ) ;
}
return total ;
}
/* Calculates the checksum of the local table */
2012-06-06 00:31:31 +04:00
static uint16_t batadv_tt_local_crc ( struct batadv_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
uint16_t total = 0 , total_one ;
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . local_hash ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2012-06-04 00:19:21 +04:00
hlist_for_each_entry_rcu ( tt_common , node , head , hash_entry ) {
2011-07-07 03:40:58 +04:00
/* not yet committed clients have not to be taken into
2012-05-12 04:09:43 +04:00
* account while computing the CRC
*/
2012-06-04 00:19:21 +04:00
if ( tt_common - > flags & BATADV_TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
continue ;
2011-04-27 16:27:44 +04:00
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
2012-06-04 00:19:21 +04:00
tt_common - > addr [ j ] ) ;
2011-04-27 16:27:44 +04:00
total ^ = total_one ;
}
rcu_read_unlock ( ) ;
}
return total ;
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_req_list_free ( struct batadv_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_req_node * node , * safe ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
list_for_each_entry_safe ( node , safe , & bat_priv - > tt . req_list , list ) {
2011-04-27 16:27:44 +04:00
list_del ( & node - > list ) ;
kfree ( node ) ;
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_save_orig_buffer ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-05-16 22:23:16 +04:00
const unsigned char * tt_buff ,
uint8_t tt_num_changes )
2011-04-27 16:27:44 +04:00
{
2012-05-12 04:09:39 +04:00
uint16_t tt_buff_len = batadv_tt_len ( tt_num_changes ) ;
2011-04-27 16:27:44 +04:00
/* Replace the old buffer only if I received something in the
2012-05-12 04:09:43 +04:00
* last OGM ( the OGM could carry no changes )
*/
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
if ( tt_buff_len > 0 ) {
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = kmalloc ( tt_buff_len , GFP_ATOMIC ) ;
if ( orig_node - > tt_buff ) {
memcpy ( orig_node - > tt_buff , tt_buff , tt_buff_len ) ;
orig_node - > tt_buff_len = tt_buff_len ;
}
}
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_req_purge ( struct batadv_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_req_node * node , * safe ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt . req_list , list ) {
2012-06-04 00:19:17 +04:00
if ( batadv_has_timed_out ( node - > issued_at ,
BATADV_TT_REQUEST_TIMEOUT ) ) {
2011-04-27 16:27:44 +04:00
list_del ( & node - > list ) ;
kfree ( node ) ;
}
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
}
/* returns the pointer to the new tt_req_node struct if no request
2012-05-12 04:09:43 +04:00
* has already been issued for this orig_node , NULL otherwise
*/
2012-06-06 00:31:31 +04:00
static struct batadv_tt_req_node *
batadv_new_tt_req_node ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_req_node * tt_req_node_tmp , * tt_req_node = NULL ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . req_list_lock ) ;
list_for_each_entry ( tt_req_node_tmp , & bat_priv - > tt . req_list , list ) {
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( tt_req_node_tmp , orig_node ) & &
! batadv_has_timed_out ( tt_req_node_tmp - > issued_at ,
2012-06-04 00:19:17 +04:00
BATADV_TT_REQUEST_TIMEOUT ) )
2011-04-27 16:27:44 +04:00
goto unlock ;
}
tt_req_node = kmalloc ( sizeof ( * tt_req_node ) , GFP_ATOMIC ) ;
if ( ! tt_req_node )
goto unlock ;
memcpy ( tt_req_node - > addr , orig_node - > orig , ETH_ALEN ) ;
tt_req_node - > issued_at = jiffies ;
2012-07-16 00:26:51 +04:00
list_add ( & tt_req_node - > list , & bat_priv - > tt . req_list ) ;
2011-04-27 16:27:44 +04:00
unlock :
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
return tt_req_node ;
}
2011-07-07 03:40:58 +04:00
/* data_ptr is useless here, but has to be kept to respect the prototype */
2012-05-16 22:23:16 +04:00
static int batadv_tt_local_valid_entry ( const void * entry_ptr ,
const void * data_ptr )
2011-07-07 03:40:58 +04:00
{
2012-06-06 00:31:31 +04:00
const struct batadv_tt_common_entry * tt_common_entry = entry_ptr ;
2011-07-07 03:40:58 +04:00
2012-06-04 00:19:21 +04:00
if ( tt_common_entry - > flags & BATADV_TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
return 0 ;
return 1 ;
}
2012-05-16 22:23:16 +04:00
static int batadv_tt_global_valid ( const void * entry_ptr ,
const void * data_ptr )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
const struct batadv_tt_common_entry * tt_common_entry = entry_ptr ;
const struct batadv_tt_global_entry * tt_global_entry ;
const struct batadv_orig_node * orig_node = data_ptr ;
2011-04-27 16:27:44 +04:00
2012-07-06 01:38:29 +04:00
if ( tt_common_entry - > flags & BATADV_TT_CLIENT_ROAM | |
tt_common_entry - > flags & BATADV_TT_CLIENT_TEMP )
2011-04-27 16:27:57 +04:00
return 0 ;
2012-06-06 00:31:31 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct batadv_tt_global_entry ,
2011-10-30 15:17:33 +04:00
common ) ;
2012-05-16 22:23:16 +04:00
return batadv_tt_global_entry_has_orig ( tt_global_entry , orig_node ) ;
2011-04-27 16:27:44 +04:00
}
2012-05-16 22:23:16 +04:00
static struct sk_buff *
batadv_tt_response_fill_table ( uint16_t tt_len , uint8_t ttvn ,
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if ,
2012-05-16 22:23:16 +04:00
int ( * valid_cb ) ( const void * , const void * ) ,
void * cb_data )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_response ;
struct batadv_tt_change * tt_change ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node ;
struct hlist_head * head ;
struct sk_buff * skb = NULL ;
uint16_t tt_tot , tt_count ;
2012-06-06 00:31:30 +04:00
ssize_t tt_query_size = sizeof ( struct batadv_tt_query_packet ) ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2012-06-06 00:31:30 +04:00
size_t len ;
2011-04-27 16:27:44 +04:00
if ( tt_query_size + tt_len > primary_if - > soft_iface - > mtu ) {
tt_len = primary_if - > soft_iface - > mtu - tt_query_size ;
2012-06-06 00:31:30 +04:00
tt_len - = tt_len % sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:30 +04:00
tt_tot = tt_len / sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
2012-06-06 00:31:30 +04:00
len = tt_query_size + tt_len ;
skb = dev_alloc_skb ( len + ETH_HLEN ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
2012-06-06 00:31:30 +04:00
tt_response = ( struct batadv_tt_query_packet * ) skb_put ( skb , len ) ;
2011-04-27 16:27:44 +04:00
tt_response - > ttvn = ttvn ;
2012-06-06 00:31:30 +04:00
tt_change = ( struct batadv_tt_change * ) ( skb - > data + tt_query_size ) ;
2011-04-27 16:27:44 +04:00
tt_count = 0 ;
rcu_read_lock ( ) ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
if ( tt_count = = tt_tot )
break ;
2011-10-30 15:17:33 +04:00
if ( ( valid_cb ) & & ( ! valid_cb ( tt_common_entry , cb_data ) ) )
2011-04-27 16:27:44 +04:00
continue ;
2011-10-30 15:17:33 +04:00
memcpy ( tt_change - > addr , tt_common_entry - > addr ,
ETH_ALEN ) ;
2012-06-04 00:19:17 +04:00
tt_change - > flags = BATADV_NO_FLAGS ;
2011-04-27 16:27:44 +04:00
tt_count + + ;
tt_change + + ;
}
}
rcu_read_unlock ( ) ;
2011-10-17 16:25:13 +04:00
/* store in the message the number of entries we have successfully
2012-05-12 04:09:43 +04:00
* copied
*/
2011-10-17 16:25:13 +04:00
tt_response - > tt_data = htons ( tt_count ) ;
2011-04-27 16:27:44 +04:00
out :
return skb ;
}
2012-06-06 00:31:31 +04:00
static int batadv_send_tt_request ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * dst_orig_node ,
2012-05-16 22:23:16 +04:00
uint8_t ttvn , uint16_t tt_crc ,
bool full_table )
2011-04-27 16:27:44 +04:00
{
struct sk_buff * skb = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_request ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node = NULL ;
struct batadv_hard_iface * primary_if ;
struct batadv_tt_req_node * tt_req_node = NULL ;
2011-04-27 16:27:44 +04:00
int ret = 1 ;
2012-06-06 00:31:30 +04:00
size_t tt_req_len ;
2011-04-27 16:27:44 +04:00
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
/* The new tt_req will be issued only if I'm not waiting for a
2012-05-12 04:09:43 +04:00
* reply from the same orig_node yet
*/
2012-05-16 22:23:16 +04:00
tt_req_node = batadv_new_tt_req_node ( bat_priv , dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! tt_req_node )
goto out ;
2012-06-06 00:31:30 +04:00
skb = dev_alloc_skb ( sizeof ( * tt_request ) + ETH_HLEN ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
2012-06-06 00:31:30 +04:00
tt_req_len = sizeof ( * tt_request ) ;
tt_request = ( struct batadv_tt_query_packet * ) skb_put ( skb , tt_req_len ) ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:21 +04:00
tt_request - > header . packet_type = BATADV_TT_QUERY ;
2012-06-04 00:19:13 +04:00
tt_request - > header . version = BATADV_COMPAT_VERSION ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_request - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_request - > dst , dst_orig_node - > orig , ETH_ALEN ) ;
2012-06-04 00:19:17 +04:00
tt_request - > header . ttl = BATADV_TTL ;
2011-04-27 16:27:44 +04:00
tt_request - > ttvn = ttvn ;
2012-04-14 15:15:27 +04:00
tt_request - > tt_data = htons ( tt_crc ) ;
2012-06-04 00:19:21 +04:00
tt_request - > flags = BATADV_TT_REQUEST ;
2011-04-27 16:27:44 +04:00
if ( full_table )
2012-06-04 00:19:21 +04:00
tt_request - > flags | = BATADV_TT_FULL_TABLE ;
2011-04-27 16:27:44 +04:00
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Sending TT_REQUEST to %pM via %pM [%c] \n " ,
dst_orig_node - > orig , neigh_node - > addr ,
( full_table ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_TT_REQUEST_TX ) ;
2012-04-20 19:02:45 +04:00
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = 0 ;
out :
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ret )
kfree_skb ( skb ) ;
if ( ret & & tt_req_node ) {
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
list_del ( & tt_req_node - > list ) ;
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
kfree ( tt_req_node ) ;
}
return ret ;
}
2012-06-06 00:31:30 +04:00
static bool
2012-06-06 00:31:31 +04:00
batadv_send_other_tt_response ( struct batadv_priv * bat_priv ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_request )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * req_dst_orig_node = NULL ;
struct batadv_orig_node * res_dst_orig_node = NULL ;
struct batadv_neigh_node * neigh_node = NULL ;
struct batadv_hard_iface * primary_if = NULL ;
2011-04-27 16:27:44 +04:00
uint8_t orig_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_response ;
2012-07-08 20:33:51 +04:00
uint8_t * packet_pos ;
2012-06-06 00:31:30 +04:00
size_t len ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c] \n " ,
tt_request - > src , tt_request - > ttvn , tt_request - > dst ,
2012-06-04 00:19:21 +04:00
( tt_request - > flags & BATADV_TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
/* Let's get the orig node of the REAL destination */
2012-05-12 15:48:56 +04:00
req_dst_orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > dst ) ;
2011-04-27 16:27:44 +04:00
if ( ! req_dst_orig_node )
goto out ;
2012-05-12 15:48:56 +04:00
res_dst_orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! res_dst_orig_node )
goto out ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( res_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
orig_ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
req_ttvn = tt_request - > ttvn ;
2011-07-09 19:52:13 +04:00
/* I don't have the requested data */
2011-04-27 16:27:44 +04:00
if ( orig_ttvn ! = req_ttvn | |
2012-04-22 10:44:27 +04:00
tt_request - > tt_data ! = htons ( req_dst_orig_node - > tt_crc ) )
2011-04-27 16:27:44 +04:00
goto out ;
2011-07-09 19:52:13 +04:00
/* If the full table has been explicitly requested */
2012-06-04 00:19:21 +04:00
if ( tt_request - > flags & BATADV_TT_FULL_TABLE | |
2011-04-27 16:27:44 +04:00
! req_dst_orig_node - > tt_buff )
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
2012-05-12 04:09:43 +04:00
* I ' ll send only one packet with as much TT entries as I can
*/
2011-04-27 16:27:44 +04:00
if ( ! full_table ) {
spin_lock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
tt_len = req_dst_orig_node - > tt_buff_len ;
2012-06-06 00:31:30 +04:00
tt_tot = tt_len / sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
2012-06-06 00:31:30 +04:00
len = sizeof ( * tt_response ) + tt_len ;
skb = dev_alloc_skb ( len + ETH_HLEN ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
2012-07-08 20:33:51 +04:00
packet_pos = skb_put ( skb , len ) ;
tt_response = ( struct batadv_tt_query_packet * ) packet_pos ;
2011-04-27 16:27:44 +04:00
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
2012-06-06 00:31:30 +04:00
tt_buff = skb - > data + sizeof ( * tt_response ) ;
2011-04-27 16:27:44 +04:00
/* Copy the last orig_node's OGM buffer */
memcpy ( tt_buff , req_dst_orig_node - > tt_buff ,
req_dst_orig_node - > tt_buff_len ) ;
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
} else {
2012-06-06 00:31:30 +04:00
tt_len = ( uint16_t ) atomic_read ( & req_dst_orig_node - > tt_size ) ;
tt_len * = sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
2012-05-16 22:23:16 +04:00
skb = batadv_tt_response_fill_table ( tt_len , ttvn ,
2012-07-16 00:26:51 +04:00
bat_priv - > tt . global_hash ,
2012-05-16 22:23:16 +04:00
primary_if ,
batadv_tt_global_valid ,
req_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
2012-06-06 00:31:30 +04:00
tt_response = ( struct batadv_tt_query_packet * ) skb - > data ;
2011-04-27 16:27:44 +04:00
}
2012-06-04 00:19:21 +04:00
tt_response - > header . packet_type = BATADV_TT_QUERY ;
2012-06-04 00:19:13 +04:00
tt_response - > header . version = BATADV_COMPAT_VERSION ;
2012-06-04 00:19:17 +04:00
tt_response - > header . ttl = BATADV_TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , req_dst_orig_node - > orig , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
2012-06-04 00:19:21 +04:00
tt_response - > flags = BATADV_TT_RESPONSE ;
2011-04-27 16:27:44 +04:00
if ( full_table )
2012-06-04 00:19:21 +04:00
tt_response - > flags | = BATADV_TT_FULL_TABLE ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u) \n " ,
res_dst_orig_node - > orig , neigh_node - > addr ,
req_dst_orig_node - > orig , req_ttvn ) ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_TT_RESPONSE_TX ) ;
2012-04-20 19:02:45 +04:00
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = true ;
goto out ;
unlock :
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
out :
if ( res_dst_orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( res_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( req_dst_orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( req_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ! ret )
kfree_skb ( skb ) ;
return ret ;
}
2012-06-06 00:31:30 +04:00
static bool
2012-06-06 00:31:31 +04:00
batadv_send_my_tt_response ( struct batadv_priv * bat_priv ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_request )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node = NULL ;
struct batadv_neigh_node * neigh_node = NULL ;
struct batadv_hard_iface * primary_if = NULL ;
2011-04-27 16:27:44 +04:00
uint8_t my_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_response ;
2012-07-08 20:33:51 +04:00
uint8_t * packet_pos ;
2012-06-06 00:31:30 +04:00
size_t len ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Received TT_REQUEST from %pM for ttvn: %u (me) [%c] \n " ,
tt_request - > src , tt_request - > ttvn ,
2012-06-04 00:19:21 +04:00
( tt_request - > flags & BATADV_TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
my_ttvn = ( uint8_t ) atomic_read ( & bat_priv - > tt . vn ) ;
2011-04-27 16:27:44 +04:00
req_ttvn = tt_request - > ttvn ;
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
/* If the full table has been explicitly requested or the gap
2012-05-12 04:09:43 +04:00
* is too big send the whole local translation table
*/
2012-06-04 00:19:21 +04:00
if ( tt_request - > flags & BATADV_TT_FULL_TABLE | | my_ttvn ! = req_ttvn | |
2012-07-16 00:26:51 +04:00
! bat_priv - > tt . last_changeset )
2011-04-27 16:27:44 +04:00
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
2012-05-12 04:09:43 +04:00
* I ' ll send only one packet with as much TT entries as I can
*/
2011-04-27 16:27:44 +04:00
if ( ! full_table ) {
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . last_changeset_lock ) ;
tt_len = bat_priv - > tt . last_changeset_len ;
2012-06-06 00:31:30 +04:00
tt_tot = tt_len / sizeof ( struct batadv_tt_change ) ;
2011-04-27 16:27:44 +04:00
2012-06-06 00:31:30 +04:00
len = sizeof ( * tt_response ) + tt_len ;
skb = dev_alloc_skb ( len + ETH_HLEN ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
2012-07-08 20:33:51 +04:00
packet_pos = skb_put ( skb , len ) ;
tt_response = ( struct batadv_tt_query_packet * ) packet_pos ;
2011-04-27 16:27:44 +04:00
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
2012-06-06 00:31:30 +04:00
tt_buff = skb - > data + sizeof ( * tt_response ) ;
2012-07-16 00:26:51 +04:00
memcpy ( tt_buff , bat_priv - > tt . last_changeset ,
bat_priv - > tt . last_changeset_len ) ;
spin_unlock_bh ( & bat_priv - > tt . last_changeset_lock ) ;
2011-04-27 16:27:44 +04:00
} else {
2012-07-16 00:26:51 +04:00
tt_len = ( uint16_t ) atomic_read ( & bat_priv - > tt . local_entry_num ) ;
2012-06-06 00:31:30 +04:00
tt_len * = sizeof ( struct batadv_tt_change ) ;
2012-07-16 00:26:51 +04:00
ttvn = ( uint8_t ) atomic_read ( & bat_priv - > tt . vn ) ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
skb = batadv_tt_response_fill_table ( tt_len , ttvn ,
2012-07-16 00:26:51 +04:00
bat_priv - > tt . local_hash ,
2012-05-16 22:23:16 +04:00
primary_if ,
batadv_tt_local_valid_entry ,
NULL ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
2012-06-06 00:31:30 +04:00
tt_response = ( struct batadv_tt_query_packet * ) skb - > data ;
2011-04-27 16:27:44 +04:00
}
2012-06-04 00:19:21 +04:00
tt_response - > header . packet_type = BATADV_TT_QUERY ;
2012-06-04 00:19:13 +04:00
tt_response - > header . version = BATADV_COMPAT_VERSION ;
2012-06-04 00:19:17 +04:00
tt_response - > header . ttl = BATADV_TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
2012-06-04 00:19:21 +04:00
tt_response - > flags = BATADV_TT_RESPONSE ;
2011-04-27 16:27:44 +04:00
if ( full_table )
2012-06-04 00:19:21 +04:00
tt_response - > flags | = BATADV_TT_FULL_TABLE ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Sending TT_RESPONSE to %pM via %pM [%c] \n " ,
orig_node - > orig , neigh_node - > addr ,
2012-06-04 00:19:21 +04:00
( tt_response - > flags & BATADV_TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_TT_RESPONSE_TX ) ;
2012-04-20 19:02:45 +04:00
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = true ;
goto out ;
unlock :
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . last_changeset_lock ) ;
2011-04-27 16:27:44 +04:00
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ! ret )
kfree_skb ( skb ) ;
/* This packet was for me, so it doesn't need to be re-routed */
return true ;
}
2012-06-06 00:31:31 +04:00
bool batadv_send_tt_response ( struct batadv_priv * bat_priv ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_request )
2011-04-27 16:27:44 +04:00
{
2012-05-12 04:09:42 +04:00
if ( batadv_is_my_mac ( tt_request - > dst ) ) {
2012-01-22 23:00:23 +04:00
/* don't answer backbone gws! */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , tt_request - > src ) )
2012-01-22 23:00:23 +04:00
return true ;
2012-05-16 22:23:16 +04:00
return batadv_send_my_tt_response ( bat_priv , tt_request ) ;
2012-01-22 23:00:23 +04:00
} else {
2012-05-16 22:23:16 +04:00
return batadv_send_other_tt_response ( bat_priv , tt_request ) ;
2012-01-22 23:00:23 +04:00
}
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
static void _batadv_tt_update_changes ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_change * tt_change ,
2012-05-16 22:23:16 +04:00
uint16_t tt_num_changes , uint8_t ttvn )
2011-04-27 16:27:44 +04:00
{
int i ;
2012-05-16 22:23:16 +04:00
int roams ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < tt_num_changes ; i + + ) {
2012-06-04 00:19:21 +04:00
if ( ( tt_change + i ) - > flags & BATADV_TT_CLIENT_DEL ) {
roams = ( tt_change + i ) - > flags & BATADV_TT_CLIENT_ROAM ;
2012-05-16 22:23:16 +04:00
batadv_tt_global_del ( bat_priv , orig_node ,
( tt_change + i ) - > addr ,
2012-05-25 02:00:54 +04:00
" tt removed by changes " ,
roams ) ;
2012-05-12 04:09:39 +04:00
} else {
if ( ! batadv_tt_global_add ( bat_priv , orig_node ,
2012-05-25 02:00:54 +04:00
( tt_change + i ) - > addr ,
( tt_change + i ) - > flags , ttvn ) )
2011-04-27 16:27:44 +04:00
/* In case of problem while storing a
* global_entry , we stop the updating
* procedure without committing the
* ttvn change . This will avoid to send
* corrupted data on tt_request
*/
return ;
2012-05-12 04:09:39 +04:00
}
2011-04-27 16:27:44 +04:00
}
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = true ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_fill_gtable ( struct batadv_priv * bat_priv ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_response )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node = NULL ;
2011-04-27 16:27:44 +04:00
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_response - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
/* Purge the old table first.. */
2012-05-12 04:09:39 +04:00
batadv_tt_global_del_orig ( bat_priv , orig_node , " Received full table " ) ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
_batadv_tt_update_changes ( bat_priv , orig_node ,
2012-06-06 00:31:30 +04:00
( struct batadv_tt_change * ) ( tt_response + 1 ) ,
2012-05-16 22:23:16 +04:00
ntohs ( tt_response - > tt_data ) ,
tt_response - > ttvn ) ;
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = NULL ;
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
atomic_set ( & orig_node - > last_ttvn , tt_response - > ttvn ) ;
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_update_changes ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-05-16 22:23:16 +04:00
uint16_t tt_num_changes , uint8_t ttvn ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_change * tt_change )
2011-04-27 16:27:44 +04:00
{
2012-05-16 22:23:16 +04:00
_batadv_tt_update_changes ( bat_priv , orig_node , tt_change ,
tt_num_changes , ttvn ) ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
batadv_tt_save_orig_buffer ( bat_priv , orig_node ,
( unsigned char * ) tt_change , tt_num_changes ) ;
2011-04-27 16:27:44 +04:00
atomic_set ( & orig_node - > last_ttvn , ttvn ) ;
}
2012-06-06 00:31:31 +04:00
bool batadv_is_my_client ( struct batadv_priv * bat_priv , const uint8_t * addr )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry * tt_local_entry = NULL ;
2011-04-27 16:28:07 +04:00
bool ret = false ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
tt_local_entry = batadv_tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2011-07-07 03:40:58 +04:00
/* Check if the client has been logically deleted (but is kept for
2012-05-12 04:09:43 +04:00
* consistency purpose )
*/
2012-06-04 00:19:21 +04:00
if ( tt_local_entry - > common . flags & BATADV_TT_CLIENT_PENDING )
2011-07-07 03:40:58 +04:00
goto out ;
2011-04-27 16:28:07 +04:00
ret = true ;
out :
2011-04-27 16:27:44 +04:00
if ( tt_local_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:28:07 +04:00
return ret ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
void batadv_handle_tt_response ( struct batadv_priv * bat_priv ,
2012-06-06 00:31:30 +04:00
struct batadv_tt_query_packet * tt_response )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_req_node * node , * safe ;
struct batadv_orig_node * orig_node = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_change * tt_change ;
2011-04-27 16:27:44 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c] \n " ,
tt_response - > src , tt_response - > ttvn ,
ntohs ( tt_response - > tt_data ) ,
2012-06-04 00:19:21 +04:00
( tt_response - > flags & BATADV_TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-01-22 23:00:23 +04:00
/* we should have never asked a backbone gw */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , tt_response - > src ) )
2012-01-22 23:00:23 +04:00
goto out ;
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_response - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
2012-06-06 00:31:30 +04:00
if ( tt_response - > flags & BATADV_TT_FULL_TABLE ) {
2012-05-16 22:23:16 +04:00
batadv_tt_fill_gtable ( bat_priv , tt_response ) ;
2012-06-06 00:31:30 +04:00
} else {
tt_change = ( struct batadv_tt_change * ) ( tt_response + 1 ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_update_changes ( bat_priv , orig_node ,
ntohs ( tt_response - > tt_data ) ,
2012-06-06 00:31:30 +04:00
tt_response - > ttvn , tt_change ) ;
}
2011-04-27 16:27:44 +04:00
/* Delete the tt_req_node from pending tt_requests list */
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt . req_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( node - > addr , tt_response - > src ) )
2011-04-27 16:27:44 +04:00
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . req_list_lock ) ;
2011-04-27 16:27:44 +04:00
/* Recalculate the CRC for this orig_node and store it */
2012-05-16 22:23:16 +04:00
orig_node - > tt_crc = batadv_tt_global_crc ( bat_priv , orig_node ) ;
2011-04-27 16:27:57 +04:00
/* Roaming phase is over: tables are in sync again. I can
2012-05-12 04:09:43 +04:00
* unset the flag
*/
2011-04-27 16:27:57 +04:00
orig_node - > tt_poss_change = false ;
2011-04-27 16:27:44 +04:00
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
}
2012-06-06 00:31:31 +04:00
int batadv_tt_init ( struct batadv_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2012-05-05 15:27:28 +04:00
int ret ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
ret = batadv_tt_local_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
return ret ;
2012-05-16 22:23:16 +04:00
ret = batadv_tt_global_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
return ret ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
batadv_tt_start_timer ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
return 1 ;
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_roam_list_free ( struct batadv_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_roam_node * node , * safe ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . roam_list_lock ) ;
2011-04-27 16:27:44 +04:00
2012-07-16 00:26:51 +04:00
list_for_each_entry_safe ( node , safe , & bat_priv - > tt . roam_list , list ) {
2011-04-27 16:27:57 +04:00
list_del ( & node - > list ) ;
kfree ( node ) ;
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . roam_list_lock ) ;
2011-04-27 16:27:57 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_tt_roam_purge ( struct batadv_priv * bat_priv )
2011-04-27 16:27:57 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_roam_node * node , * safe ;
2011-04-27 16:27:57 +04:00
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . roam_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt . roam_list , list ) {
2012-06-04 00:19:17 +04:00
if ( ! batadv_has_timed_out ( node - > first_time ,
BATADV_ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . roam_list_lock ) ;
2011-04-27 16:27:57 +04:00
}
/* This function checks whether the client already reached the
* maximum number of possible roaming phases . In this case the ROAMING_ADV
* will not be sent .
*
2012-05-12 04:09:43 +04:00
* returns true if the ROAMING_ADV can be sent , false otherwise
*/
2012-06-06 00:31:31 +04:00
static bool batadv_tt_check_roam_count ( struct batadv_priv * bat_priv ,
2012-05-16 22:23:16 +04:00
uint8_t * client )
2011-04-27 16:27:57 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_roam_node * tt_roam_node ;
2011-04-27 16:27:57 +04:00
bool ret = false ;
2012-07-16 00:26:51 +04:00
spin_lock_bh ( & bat_priv - > tt . roam_list_lock ) ;
2011-04-27 16:27:57 +04:00
/* The new tt_req will be issued only if I'm not waiting for a
2012-05-12 04:09:43 +04:00
* reply from the same orig_node yet
*/
2012-07-16 00:26:51 +04:00
list_for_each_entry ( tt_roam_node , & bat_priv - > tt . roam_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( tt_roam_node - > addr , client ) )
2011-04-27 16:27:57 +04:00
continue ;
2012-05-12 15:48:58 +04:00
if ( batadv_has_timed_out ( tt_roam_node - > first_time ,
2012-06-04 00:19:17 +04:00
BATADV_ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
2012-05-16 22:23:22 +04:00
if ( ! batadv_atomic_dec_not_zero ( & tt_roam_node - > counter ) )
2011-04-27 16:27:57 +04:00
/* Sorry, you roamed too many times! */
goto unlock ;
ret = true ;
break ;
}
if ( ! ret ) {
tt_roam_node = kmalloc ( sizeof ( * tt_roam_node ) , GFP_ATOMIC ) ;
if ( ! tt_roam_node )
goto unlock ;
tt_roam_node - > first_time = jiffies ;
2012-06-04 00:19:17 +04:00
atomic_set ( & tt_roam_node - > counter ,
BATADV_ROAMING_MAX_COUNT - 1 ) ;
2011-04-27 16:27:57 +04:00
memcpy ( tt_roam_node - > addr , client , ETH_ALEN ) ;
2012-07-16 00:26:51 +04:00
list_add ( & tt_roam_node - > list , & bat_priv - > tt . roam_list ) ;
2011-04-27 16:27:57 +04:00
ret = true ;
}
unlock :
2012-07-16 00:26:51 +04:00
spin_unlock_bh ( & bat_priv - > tt . roam_list_lock ) ;
2011-04-27 16:27:57 +04:00
return ret ;
}
2012-06-06 00:31:31 +04:00
static void batadv_send_roam_adv ( struct batadv_priv * bat_priv , uint8_t * client ,
struct batadv_orig_node * orig_node )
2011-04-27 16:27:57 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node = NULL ;
2011-04-27 16:27:57 +04:00
struct sk_buff * skb = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_roam_adv_packet * roam_adv_packet ;
2011-04-27 16:27:57 +04:00
int ret = 1 ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if ;
2012-06-06 00:31:30 +04:00
size_t len = sizeof ( * roam_adv_packet ) ;
2011-04-27 16:27:57 +04:00
/* before going on we have to check whether the client has
2012-05-12 04:09:43 +04:00
* already roamed to us too many times
*/
2012-05-16 22:23:16 +04:00
if ( ! batadv_tt_check_roam_count ( bat_priv , client ) )
2011-04-27 16:27:57 +04:00
goto out ;
2012-06-06 00:31:30 +04:00
skb = dev_alloc_skb ( sizeof ( * roam_adv_packet ) + ETH_HLEN ) ;
2011-04-27 16:27:57 +04:00
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
2012-06-06 00:31:30 +04:00
roam_adv_packet = ( struct batadv_roam_adv_packet * ) skb_put ( skb , len ) ;
2011-04-27 16:27:57 +04:00
2012-06-04 00:19:21 +04:00
roam_adv_packet - > header . packet_type = BATADV_ROAM_ADV ;
2012-06-04 00:19:13 +04:00
roam_adv_packet - > header . version = BATADV_COMPAT_VERSION ;
2012-06-04 00:19:17 +04:00
roam_adv_packet - > header . ttl = BATADV_TTL ;
2012-06-28 13:56:52 +04:00
roam_adv_packet - > reserved = 0 ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
if ( ! primary_if )
goto out ;
memcpy ( roam_adv_packet - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:57 +04:00
memcpy ( roam_adv_packet - > dst , orig_node - > orig , ETH_ALEN ) ;
memcpy ( roam_adv_packet - > client , client , ETH_ALEN ) ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( orig_node ) ;
2011-04-27 16:27:57 +04:00
if ( ! neigh_node )
goto out ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Sending ROAMING_ADV to %pM (client %pM) via %pM \n " ,
orig_node - > orig , client , neigh_node - > addr ) ;
2011-04-27 16:27:57 +04:00
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_TT_ROAM_ADV_TX ) ;
2012-04-20 19:02:45 +04:00
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:57 +04:00
ret = 0 ;
out :
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:57 +04:00
if ( ret )
kfree_skb ( skb ) ;
return ;
2011-04-27 16:27:44 +04:00
}
2012-05-16 22:23:16 +04:00
static void batadv_tt_purge ( struct work_struct * work )
2011-04-27 16:27:44 +04:00
{
2012-06-06 00:31:31 +04:00
struct delayed_work * delayed_work ;
2012-07-16 00:26:51 +04:00
struct batadv_priv_tt * priv_tt ;
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv ;
delayed_work = container_of ( work , struct delayed_work , work ) ;
2012-07-16 00:26:51 +04:00
priv_tt = container_of ( delayed_work , struct batadv_priv_tt , work ) ;
bat_priv = container_of ( priv_tt , struct batadv_priv , tt ) ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
batadv_tt_local_purge ( bat_priv ) ;
2012-07-06 01:38:29 +04:00
batadv_tt_global_purge ( bat_priv ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_req_purge ( bat_priv ) ;
batadv_tt_roam_purge ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
2012-05-16 22:23:16 +04:00
batadv_tt_start_timer ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
}
2011-04-27 16:27:57 +04:00
2012-06-06 00:31:31 +04:00
void batadv_tt_free ( struct batadv_priv * bat_priv )
2011-04-27 16:27:57 +04:00
{
2012-07-16 00:26:51 +04:00
cancel_delayed_work_sync ( & bat_priv - > tt . work ) ;
2011-04-27 16:27:57 +04:00
2012-05-16 22:23:16 +04:00
batadv_tt_local_table_free ( bat_priv ) ;
batadv_tt_global_table_free ( bat_priv ) ;
batadv_tt_req_list_free ( bat_priv ) ;
batadv_tt_changes_list_free ( bat_priv ) ;
batadv_tt_roam_list_free ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
2012-07-16 00:26:51 +04:00
kfree ( bat_priv - > tt . last_changeset ) ;
2011-04-27 16:27:57 +04:00
}
2011-07-07 03:40:58 +04:00
2011-11-07 19:47:01 +04:00
/* This function will enable or disable the specified flags for all the entries
2012-05-12 04:09:43 +04:00
* in the given hash table and returns the number of modified entries
*/
2012-06-06 00:31:28 +04:00
static uint16_t batadv_tt_set_flags ( struct batadv_hashtable * hash ,
uint16_t flags , bool enable )
2011-07-07 03:40:58 +04:00
{
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-11-07 19:47:01 +04:00
uint16_t changed_num = 0 ;
2011-07-07 03:40:58 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common_entry ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
2011-11-07 19:47:01 +04:00
goto out ;
2011-07-07 03:40:58 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-07-07 03:40:58 +04:00
head , hash_entry ) {
2011-11-07 19:47:01 +04:00
if ( enable ) {
if ( ( tt_common_entry - > flags & flags ) = = flags )
continue ;
tt_common_entry - > flags | = flags ;
} else {
if ( ! ( tt_common_entry - > flags & flags ) )
continue ;
tt_common_entry - > flags & = ~ flags ;
}
changed_num + + ;
2011-07-07 03:40:58 +04:00
}
rcu_read_unlock ( ) ;
}
2011-11-07 19:47:01 +04:00
out :
return changed_num ;
2011-07-07 03:40:58 +04:00
}
2012-06-04 00:19:21 +04:00
/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2012-06-06 00:31:31 +04:00
static void batadv_tt_local_purge_pending_clients ( struct batadv_priv * bat_priv )
2011-07-07 03:40:58 +04:00
{
2012-07-16 00:26:51 +04:00
struct batadv_hashtable * hash = bat_priv - > tt . local_hash ;
2012-06-06 00:31:31 +04:00
struct batadv_tt_common_entry * tt_common ;
struct batadv_tt_local_entry * tt_local ;
2011-07-07 03:40:58 +04:00
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
return ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2012-06-04 00:19:21 +04:00
hlist_for_each_entry_safe ( tt_common , node , node_tmp , head ,
hash_entry ) {
if ( ! ( tt_common - > flags & BATADV_TT_CLIENT_PENDING ) )
2011-07-07 03:40:58 +04:00
continue ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Deleting local tt entry (%pM): pending \n " ,
2012-06-04 00:19:21 +04:00
tt_common - > addr ) ;
2011-07-07 03:40:58 +04:00
2012-07-16 00:26:51 +04:00
atomic_dec ( & bat_priv - > tt . local_entry_num ) ;
2011-07-07 03:40:58 +04:00
hlist_del_rcu ( node ) ;
2012-06-06 00:31:31 +04:00
tt_local = container_of ( tt_common ,
struct batadv_tt_local_entry ,
common ) ;
batadv_tt_local_entry_free_ref ( tt_local ) ;
2011-07-07 03:40:58 +04:00
}
spin_unlock_bh ( list_lock ) ;
}
}
2012-06-06 00:31:31 +04:00
static int batadv_tt_commit_changes ( struct batadv_priv * bat_priv ,
2012-05-16 22:23:16 +04:00
unsigned char * * packet_buff ,
int * packet_buff_len , int packet_min_len )
2011-07-07 03:40:58 +04:00
{
2012-05-07 00:22:05 +04:00
uint16_t changed_num = 0 ;
2012-07-16 00:26:51 +04:00
if ( atomic_read ( & bat_priv - > tt . local_changes ) < 1 )
2012-05-07 00:22:05 +04:00
return - ENOENT ;
2012-07-16 00:26:51 +04:00
changed_num = batadv_tt_set_flags ( bat_priv - > tt . local_hash ,
2012-06-04 00:19:21 +04:00
BATADV_TT_CLIENT_NEW , false ) ;
2012-05-07 00:22:05 +04:00
/* all reset entries have to be counted as local entries */
2012-07-16 00:26:51 +04:00
atomic_add ( changed_num , & bat_priv - > tt . local_entry_num ) ;
2012-05-16 22:23:16 +04:00
batadv_tt_local_purge_pending_clients ( bat_priv ) ;
2012-07-16 00:26:51 +04:00
bat_priv - > tt . local_crc = batadv_tt_local_crc ( bat_priv ) ;
2011-07-07 03:40:58 +04:00
/* Increment the TTVN only once per OGM interval */
2012-07-16 00:26:51 +04:00
atomic_inc ( & bat_priv - > tt . vn ) ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" Local changes committed, updating to ttvn %u \n " ,
2012-07-16 00:26:51 +04:00
( uint8_t ) atomic_read ( & bat_priv - > tt . vn ) ) ;
bat_priv - > tt . poss_change = false ;
2012-05-07 00:22:05 +04:00
/* reset the sending counter */
2012-07-16 00:26:51 +04:00
atomic_set ( & bat_priv - > tt . ogm_append_cnt , BATADV_TT_OGM_APPEND_MAX ) ;
2012-05-07 00:22:05 +04:00
2012-05-16 22:23:16 +04:00
return batadv_tt_changes_fill_buff ( bat_priv , packet_buff ,
packet_buff_len , packet_min_len ) ;
2012-05-07 00:22:05 +04:00
}
/* when calling this function (hard_iface == primary_if) has to be true */
2012-06-06 00:31:31 +04:00
int batadv_tt_append_diff ( struct batadv_priv * bat_priv ,
2012-05-07 00:22:05 +04:00
unsigned char * * packet_buff , int * packet_buff_len ,
int packet_min_len )
{
int tt_num_changes ;
/* if at least one change happened */
2012-05-16 22:23:16 +04:00
tt_num_changes = batadv_tt_commit_changes ( bat_priv , packet_buff ,
packet_buff_len ,
packet_min_len ) ;
2012-05-07 00:22:05 +04:00
/* if the changes have been sent often enough */
if ( ( tt_num_changes < 0 ) & &
2012-07-16 00:26:51 +04:00
( ! batadv_atomic_dec_not_zero ( & bat_priv - > tt . ogm_append_cnt ) ) ) {
2012-05-16 22:23:16 +04:00
batadv_tt_realloc_packet_buff ( packet_buff , packet_buff_len ,
packet_min_len , packet_min_len ) ;
2012-05-07 00:22:05 +04:00
tt_num_changes = 0 ;
}
return tt_num_changes ;
2011-07-07 03:40:58 +04:00
}
2011-07-07 17:35:36 +04:00
2012-06-06 00:31:31 +04:00
bool batadv_is_ap_isolated ( struct batadv_priv * bat_priv , uint8_t * src ,
2012-05-12 04:09:39 +04:00
uint8_t * dst )
2011-07-07 17:35:36 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_local_entry * tt_local_entry = NULL ;
struct batadv_tt_global_entry * tt_global_entry = NULL ;
2012-06-20 19:16:05 +04:00
bool ret = false ;
2011-07-07 17:35:36 +04:00
if ( ! atomic_read ( & bat_priv - > ap_isolation ) )
2012-06-20 19:16:05 +04:00
goto out ;
2011-07-07 17:35:36 +04:00
2012-05-16 22:23:16 +04:00
tt_local_entry = batadv_tt_local_hash_find ( bat_priv , dst ) ;
2011-07-07 17:35:36 +04:00
if ( ! tt_local_entry )
goto out ;
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , src ) ;
2011-07-07 17:35:36 +04:00
if ( ! tt_global_entry )
goto out ;
2012-06-26 00:49:50 +04:00
if ( ! _batadv_is_ap_isolated ( tt_local_entry , tt_global_entry ) )
2011-07-07 17:35:36 +04:00
goto out ;
2012-06-20 19:16:05 +04:00
ret = true ;
2011-07-07 17:35:36 +04:00
out :
if ( tt_global_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2011-07-07 17:35:36 +04:00
if ( tt_local_entry )
2012-05-16 22:23:16 +04:00
batadv_tt_local_entry_free_ref ( tt_local_entry ) ;
2011-07-07 17:35:36 +04:00
return ret ;
}
2011-07-30 15:10:18 +04:00
2012-06-06 00:31:31 +04:00
void batadv_tt_update_orig ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2012-05-12 04:09:39 +04:00
const unsigned char * tt_buff , uint8_t tt_num_changes ,
uint8_t ttvn , uint16_t tt_crc )
2011-07-30 15:10:18 +04:00
{
uint8_t orig_ttvn = ( uint8_t ) atomic_read ( & orig_node - > last_ttvn ) ;
bool full_table = true ;
2012-06-06 00:31:30 +04:00
struct batadv_tt_change * tt_change ;
2011-07-30 15:10:18 +04:00
2012-01-22 23:00:23 +04:00
/* don't care about a backbone gateways updates. */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , orig_node - > orig ) )
2012-01-22 23:00:23 +04:00
return ;
2011-11-07 19:36:40 +04:00
/* orig table not initialised AND first diff is in the OGM OR the ttvn
2012-05-12 04:09:43 +04:00
* increased by one - > we can apply the attached changes
*/
2011-11-07 19:36:40 +04:00
if ( ( ! orig_node - > tt_initialised & & ttvn = = 1 ) | |
ttvn - orig_ttvn = = 1 ) {
2011-07-30 15:10:18 +04:00
/* the OGM could not contain the changes due to their size or
2012-06-04 00:19:17 +04:00
* because they have already been sent BATADV_TT_OGM_APPEND_MAX
* times .
2012-05-12 04:09:43 +04:00
* In this case send a tt request
*/
2011-07-30 15:10:18 +04:00
if ( ! tt_num_changes ) {
full_table = false ;
goto request_table ;
}
2012-06-06 00:31:30 +04:00
tt_change = ( struct batadv_tt_change * ) tt_buff ;
2012-05-16 22:23:16 +04:00
batadv_tt_update_changes ( bat_priv , orig_node , tt_num_changes ,
2012-06-06 00:31:30 +04:00
ttvn , tt_change ) ;
2011-07-30 15:10:18 +04:00
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
2012-05-12 04:09:43 +04:00
* in the global table
*/
2012-05-16 22:23:16 +04:00
orig_node - > tt_crc = batadv_tt_global_crc ( bat_priv , orig_node ) ;
2011-07-30 15:10:18 +04:00
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
* ( due to the wrap around ) . Thus a node has to check whether
* the resulting table ( after applying the changes ) is still
* consistent or not . E . g . a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX : in this case
* checking the CRC value is mandatory to detect the
2012-05-12 04:09:43 +04:00
* inconsistency
*/
2011-07-30 15:10:18 +04:00
if ( orig_node - > tt_crc ! = tt_crc )
goto request_table ;
/* Roaming phase is over: tables are in sync again. I can
2012-05-12 04:09:43 +04:00
* unset the flag
*/
2011-07-30 15:10:18 +04:00
orig_node - > tt_poss_change = false ;
} else {
/* if we missed more than one change or our tables are not
2012-05-12 04:09:43 +04:00
* in sync anymore - > request fresh tt data
*/
2011-11-07 19:36:40 +04:00
if ( ! orig_node - > tt_initialised | | ttvn ! = orig_ttvn | |
orig_node - > tt_crc ! = tt_crc ) {
2011-07-30 15:10:18 +04:00
request_table :
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
2012-05-12 15:48:58 +04:00
" TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u) \n " ,
orig_node - > orig , ttvn , orig_ttvn , tt_crc ,
orig_node - > tt_crc , tt_num_changes ) ;
2012-05-16 22:23:16 +04:00
batadv_send_tt_request ( bat_priv , orig_node , ttvn ,
tt_crc , full_table ) ;
2011-07-30 15:10:18 +04:00
return ;
}
}
}
2012-03-16 21:03:28 +04:00
/* returns true whether we know that the client has moved from its old
* originator to another one . This entry is kept is still kept for consistency
* purposes
*/
2012-06-06 00:31:31 +04:00
bool batadv_tt_global_client_is_roaming ( struct batadv_priv * bat_priv ,
2012-05-12 04:09:39 +04:00
uint8_t * addr )
2012-03-16 21:03:28 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_tt_global_entry * tt_global_entry ;
2012-03-16 21:03:28 +04:00
bool ret = false ;
2012-05-16 22:23:16 +04:00
tt_global_entry = batadv_tt_global_hash_find ( bat_priv , addr ) ;
2012-03-16 21:03:28 +04:00
if ( ! tt_global_entry )
goto out ;
2012-06-04 00:19:21 +04:00
ret = tt_global_entry - > common . flags & BATADV_TT_CLIENT_ROAM ;
2012-05-16 22:23:16 +04:00
batadv_tt_global_entry_free_ref ( tt_global_entry ) ;
2012-03-16 21:03:28 +04:00
out :
return ret ;
}
2012-07-06 01:38:29 +04:00
bool batadv_tt_add_temporary_global_entry ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
const unsigned char * addr )
{
bool ret = false ;
if ( ! batadv_tt_global_add ( bat_priv , orig_node , addr ,
BATADV_TT_CLIENT_TEMP ,
atomic_read ( & orig_node - > last_ttvn ) ) )
goto out ;
batadv_dbg ( BATADV_DBG_TT , bat_priv ,
" Added temporary global client (addr: %pM orig: %pM) \n " ,
addr , orig_node - > orig ) ;
ret = true ;
out :
return ret ;
}