2010-12-13 14:19:28 +03:00
/*
2011-01-27 12:38:15 +03:00
* Copyright ( C ) 2007 - 2011 B . A . T . M . A . N . contributors :
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*
*/
# include "main.h"
# include "translation-table.h"
# include "soft-interface.h"
2011-04-20 17:40:58 +04:00
# include "hard-interface.h"
2011-04-27 16:27:44 +04:00
# include "send.h"
2010-12-13 14:19:28 +03:00
# include "hash.h"
# include "originator.h"
2011-04-27 16:27:44 +04:00
# include "routing.h"
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
# include <linux/crc16.h>
static void _tt_global_del ( struct bat_priv * bat_priv ,
struct tt_global_entry * tt_global_entry ,
const char * message ) ;
static void tt_purge ( struct work_struct * work ) ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:09 +03:00
/* returns 1 if they are the same mac addr */
2011-10-30 15:17:33 +04:00
static int compare_tt ( const struct hlist_node * node , const void * data2 )
2011-02-18 15:28:09 +03:00
{
2011-10-30 15:17:33 +04:00
const void * data1 = container_of ( node , struct tt_common_entry ,
2011-05-15 01:14:50 +04:00
hash_entry ) ;
2011-02-18 15:28:09 +03:00
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
2011-04-27 16:27:44 +04:00
static void tt_start_timer ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
INIT_DELAYED_WORK ( & bat_priv - > tt_work , tt_purge ) ;
queue_delayed_work ( bat_event_workqueue , & bat_priv - > tt_work ,
msecs_to_jiffies ( 5000 ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-10-30 15:17:33 +04:00
static struct tt_common_entry * tt_hash_find ( struct hashtable_t * hash ,
const void * data )
2011-02-18 15:28:09 +03:00
{
struct hlist_head * head ;
struct hlist_node * node ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry , * tt_common_entry_tmp = NULL ;
2011-10-05 19:05:25 +04:00
uint32_t index ;
2011-02-18 15:28:09 +03:00
if ( ! hash )
return NULL ;
index = choose_orig ( data , hash - > size ) ;
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node , head , hash_entry ) {
if ( ! compare_eth ( tt_common_entry , data ) )
2011-02-18 15:28:09 +03:00
continue ;
2011-10-30 15:17:33 +04:00
if ( ! atomic_inc_not_zero ( & tt_common_entry - > refcount ) )
2011-04-27 16:28:07 +04:00
continue ;
2011-10-30 15:17:33 +04:00
tt_common_entry_tmp = tt_common_entry ;
2011-02-18 15:28:09 +03:00
break ;
}
rcu_read_unlock ( ) ;
2011-10-30 15:17:33 +04:00
return tt_common_entry_tmp ;
2011-02-18 15:28:09 +03:00
}
2011-10-30 15:17:33 +04:00
static struct tt_local_entry * tt_local_hash_find ( struct bat_priv * bat_priv ,
const void * data )
2011-02-18 15:28:09 +03:00
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
struct tt_local_entry * tt_local_entry = NULL ;
2011-02-18 15:28:09 +03:00
2011-10-30 15:17:33 +04:00
tt_common_entry = tt_hash_find ( bat_priv - > tt_local_hash , data ) ;
if ( tt_common_entry )
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry , common ) ;
return tt_local_entry ;
}
2011-02-18 15:28:09 +03:00
2011-10-30 15:17:33 +04:00
static struct tt_global_entry * tt_global_hash_find ( struct bat_priv * bat_priv ,
const void * data )
{
struct tt_common_entry * tt_common_entry ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-04-27 16:28:07 +04:00
2011-10-30 15:17:33 +04:00
tt_common_entry = tt_hash_find ( bat_priv - > tt_global_hash , data ) ;
if ( tt_common_entry )
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry , common ) ;
return tt_global_entry ;
2011-02-18 15:28:09 +03:00
}
2011-04-27 16:28:07 +04:00
static void tt_local_entry_free_ref ( struct tt_local_entry * tt_local_entry )
{
2011-10-30 15:17:33 +04:00
if ( atomic_dec_and_test ( & tt_local_entry - > common . refcount ) )
kfree_rcu ( tt_local_entry , common . rcu ) ;
2011-04-27 16:28:07 +04:00
}
2011-10-19 13:02:25 +04:00
static void tt_global_entry_free_rcu ( struct rcu_head * rcu )
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-10-19 13:02:25 +04:00
struct tt_global_entry * tt_global_entry ;
2011-10-30 15:17:33 +04:00
tt_common_entry = container_of ( rcu , struct tt_common_entry , rcu ) ;
tt_global_entry = container_of ( tt_common_entry , struct tt_global_entry ,
common ) ;
2011-10-19 13:02:25 +04:00
if ( tt_global_entry - > orig_node )
orig_node_free_ref ( tt_global_entry - > orig_node ) ;
kfree ( tt_global_entry ) ;
}
2011-04-27 16:28:07 +04:00
static void tt_global_entry_free_ref ( struct tt_global_entry * tt_global_entry )
{
2011-10-30 15:17:33 +04:00
if ( atomic_dec_and_test ( & tt_global_entry - > common . refcount ) )
call_rcu ( & tt_global_entry - > common . rcu ,
tt_global_entry_free_rcu ) ;
2011-04-27 16:28:07 +04:00
}
2011-06-30 03:14:00 +04:00
static void tt_local_event ( struct bat_priv * bat_priv , const uint8_t * addr ,
uint8_t flags )
2011-04-27 16:27:44 +04:00
{
struct tt_change_node * tt_change_node ;
tt_change_node = kmalloc ( sizeof ( * tt_change_node ) , GFP_ATOMIC ) ;
if ( ! tt_change_node )
return ;
2011-06-30 03:14:00 +04:00
tt_change_node - > change . flags = flags ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_change_node - > change . addr , addr , ETH_ALEN ) ;
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
/* track the change in the OGMinterval list */
list_add_tail ( & tt_change_node - > list , & bat_priv - > tt_changes_list ) ;
atomic_inc ( & bat_priv - > tt_local_changes ) ;
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
atomic_set ( & bat_priv - > tt_ogm_append_cnt , 0 ) ;
}
int tt_len ( int changes_num )
{
return changes_num * sizeof ( struct tt_change ) ;
}
static int tt_local_init ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
if ( bat_priv - > tt_local_hash )
2010-12-13 14:19:28 +03:00
return 1 ;
2011-05-05 10:42:45 +04:00
bat_priv - > tt_local_hash = hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_local_hash )
2010-12-13 14:19:28 +03:00
return 0 ;
return 1 ;
}
2011-07-07 17:35:35 +04:00
void tt_local_add ( struct net_device * soft_iface , const uint8_t * addr ,
int ifindex )
2010-12-13 14:19:28 +03:00
{
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( tt_local_entry ) {
tt_local_entry - > last_seen = jiffies ;
2011-04-27 16:28:07 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-05-15 01:14:54 +04:00
tt_local_entry = kmalloc ( sizeof ( * tt_local_entry ) , GFP_ATOMIC ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_local_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
bat_dbg ( DBG_TT , bat_priv ,
" Creating new local tt entry: %pM (ttvn: %d) \n " , addr ,
( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ) ;
2010-12-13 14:19:28 +03:00
2011-10-30 15:17:33 +04:00
memcpy ( tt_local_entry - > common . addr , addr , ETH_ALEN ) ;
tt_local_entry - > common . flags = NO_FLAGS ;
2011-07-07 17:35:35 +04:00
if ( is_wifi_iface ( ifindex ) )
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_WIFI ;
atomic_set ( & tt_local_entry - > common . refcount , 2 ) ;
tt_local_entry - > last_seen = jiffies ;
2010-12-13 14:19:28 +03:00
/* the batman interface mac address should never be purged */
2011-02-18 15:28:08 +03:00
if ( compare_eth ( addr , soft_iface - > dev_addr ) )
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_NOPURGE ;
2010-12-13 14:19:28 +03:00
2011-11-02 23:26:45 +04:00
hash_added = hash_add ( bat_priv - > tt_local_hash , compare_tt , choose_orig ,
& tt_local_entry - > common ,
& tt_local_entry - > common . hash_entry ) ;
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
tt_local_entry_free_ref ( tt_local_entry ) ;
goto out ;
}
2011-10-30 15:17:33 +04:00
tt_local_event ( bat_priv , addr , tt_local_entry - > common . flags ) ;
2011-06-30 03:14:00 +04:00
2011-07-07 03:40:58 +04:00
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
* ( consistency check ) */
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_NEW ;
2011-07-07 03:40:58 +04:00
2010-12-13 14:19:28 +03:00
/* remove address from global hash if present */
2011-05-05 10:42:45 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:57 +04:00
/* Check whether it is a roaming! */
if ( tt_global_entry ) {
/* This node is probably going to update its tt table */
tt_global_entry - > orig_node - > tt_poss_change = true ;
2011-12-04 15:26:50 +04:00
/* The global entry has to be marked as ROAMING and has to be
2011-07-07 03:40:59 +04:00
* kept for consistency purpose */
2011-12-17 00:07:28 +04:00
tt_global_entry - > common . flags | = TT_CLIENT_ROAM ;
2011-12-04 15:26:50 +04:00
tt_global_entry - > roam_at = jiffies ;
2011-10-30 15:17:33 +04:00
send_roam_adv ( bat_priv , tt_global_entry - > common . addr ,
2011-04-27 16:28:07 +04:00
tt_global_entry - > orig_node ) ;
}
out :
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
int tt_changes_fill_buffer ( struct bat_priv * bat_priv ,
unsigned char * buff , int buff_len )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
int count = 0 , tot_changes = 0 ;
struct tt_change_node * entry , * safe ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
if ( buff_len > 0 )
tot_changes = buff_len / tt_len ( 1 ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
atomic_set ( & bat_priv - > tt_local_changes , 0 ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt_changes_list ,
list ) {
if ( count < tot_changes ) {
memcpy ( buff + tt_len ( count ) ,
& entry - > change , sizeof ( struct tt_change ) ) ;
2010-12-13 14:19:28 +03:00
count + + ;
}
2011-04-27 16:27:44 +04:00
list_del ( & entry - > list ) ;
kfree ( entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
/* Keep the buffer for possible tt_request */
spin_lock_bh ( & bat_priv - > tt_buff_lock ) ;
kfree ( bat_priv - > tt_buff ) ;
bat_priv - > tt_buff_len = 0 ;
bat_priv - > tt_buff = NULL ;
/* We check whether this new OGM has no changes due to size
* problems */
if ( buff_len > 0 ) {
/**
* if kmalloc ( ) fails we will reply with the full table
* instead of providing the diff
*/
bat_priv - > tt_buff = kmalloc ( buff_len , GFP_ATOMIC ) ;
if ( bat_priv - > tt_buff ) {
memcpy ( bat_priv - > tt_buff , buff , buff_len ) ;
bat_priv - > tt_buff_len = buff_len ;
}
}
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
return tot_changes ;
2010-12-13 14:19:28 +03:00
}
2011-05-05 10:42:45 +04:00
int tt_local_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret = 0 ;
2011-04-20 17:40:58 +04:00
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" please specify interfaces to enable it \n " ,
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" primary interface not active \n " ,
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
seq_printf ( seq , " Locally retrieved addresses (from %s) "
2011-04-27 16:27:44 +04:00
" announced via TT (TTVN: %u): \n " ,
net_dev - > name , ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-22 20:15:26 +04:00
seq_printf ( seq , " * %pM [%c%c%c%c%c] \n " ,
2011-10-30 15:17:33 +04:00
tt_common_entry - > addr ,
( tt_common_entry - > flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_common_entry - > flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_NOPURGE ? ' P ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_common_entry - > flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_NEW ? ' N ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_common_entry - > flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_PENDING ? ' X ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_common_entry - > flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_WIFI ? ' W ' : ' . ' ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
hardif_free_ref ( primary_if ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-07-07 03:40:58 +04:00
static void tt_local_set_pending ( struct bat_priv * bat_priv ,
struct tt_local_entry * tt_local_entry ,
uint16_t flags )
2010-12-13 14:19:28 +03:00
{
2011-10-30 15:17:33 +04:00
tt_local_event ( bat_priv , tt_local_entry - > common . addr ,
tt_local_entry - > common . flags | flags ) ;
2011-04-27 16:27:44 +04:00
2011-07-09 19:52:13 +04:00
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
2011-07-07 03:40:58 +04:00
* response issued before the net ttvn increment ( consistency check ) */
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_PENDING ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
void tt_local_remove ( struct bat_priv * bat_priv , const uint8_t * addr ,
2011-04-27 16:27:57 +04:00
const char * message , bool roaming )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2011-07-07 03:40:58 +04:00
tt_local_set_pending ( bat_priv , tt_local_entry , TT_CLIENT_DEL |
( roaming ? TT_CLIENT_ROAM : NO_FLAGS ) ) ;
bat_dbg ( DBG_TT , bat_priv , " Local tt entry (%pM) pending to be removed: "
2011-10-30 15:17:33 +04:00
" %s \n " , tt_local_entry - > common . addr , message ) ;
2011-04-27 16:28:07 +04:00
out :
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static void tt_local_purge ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
struct tt_local_entry * tt_local_entry ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
if ( tt_local_entry - > common . flags & TT_CLIENT_NOPURGE )
2011-02-18 15:28:09 +03:00
continue ;
2010-12-13 14:19:28 +03:00
2011-07-07 03:40:58 +04:00
/* entry already marked for deletion */
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_PENDING )
2011-07-07 03:40:58 +04:00
continue ;
2011-12-08 16:32:41 +04:00
if ( ! has_timed_out ( tt_local_entry - > last_seen ,
2011-12-20 15:30:40 +04:00
TT_LOCAL_TIMEOUT ) )
2011-02-18 15:28:09 +03:00
continue ;
2011-07-07 03:40:58 +04:00
tt_local_set_pending ( bat_priv , tt_local_entry ,
TT_CLIENT_DEL ) ;
bat_dbg ( DBG_TT , bat_priv , " Local tt entry (%pM) "
" pending to be removed: timed out \n " ,
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . addr ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
}
2011-04-27 16:27:44 +04:00
static void tt_local_table_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
struct hashtable_t * hash ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_local_entry * tt_local_entry ;
2011-04-27 16:28:07 +04:00
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_local_hash )
2010-12-13 14:19:28 +03:00
return ;
2011-04-27 16:27:44 +04:00
hash = bat_priv - > tt_local_hash ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
2011-04-27 16:28:07 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:27:44 +04:00
}
spin_unlock_bh ( list_lock ) ;
}
hash_destroy ( hash ) ;
2011-05-05 10:42:45 +04:00
bat_priv - > tt_local_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static int tt_global_init ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
if ( bat_priv - > tt_global_hash )
2010-12-13 14:19:28 +03:00
return 1 ;
2011-05-05 10:42:45 +04:00
bat_priv - > tt_global_hash = hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_global_hash )
2010-12-13 14:19:28 +03:00
return 0 ;
return 1 ;
}
2011-04-27 16:27:44 +04:00
static void tt_changes_list_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
struct tt_change_node * entry , * safe ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt_changes_list ,
list ) {
list_del ( & entry - > list ) ;
kfree ( entry ) ;
}
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
atomic_set ( & bat_priv - > tt_local_changes , 0 ) ;
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
}
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
/* caller must hold orig_node refcount */
int tt_global_add ( struct bat_priv * bat_priv , struct orig_node * orig_node ,
2011-07-07 17:35:35 +04:00
const unsigned char * tt_addr , uint8_t ttvn , bool roaming ,
bool wifi )
2011-04-27 16:27:44 +04:00
{
struct tt_global_entry * tt_global_entry ;
struct orig_node * orig_node_tmp ;
2011-04-27 16:28:07 +04:00
int ret = 0 ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , tt_addr ) ;
if ( ! tt_global_entry ) {
tt_global_entry =
kmalloc ( sizeof ( * tt_global_entry ) ,
GFP_ATOMIC ) ;
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-10-30 15:17:33 +04:00
memcpy ( tt_global_entry - > common . addr , tt_addr , ETH_ALEN ) ;
tt_global_entry - > common . flags = NO_FLAGS ;
atomic_set ( & tt_global_entry - > common . refcount , 2 ) ;
2011-04-27 16:27:44 +04:00
/* Assign the new orig_node */
atomic_inc ( & orig_node - > refcount ) ;
2011-05-05 10:42:45 +04:00
tt_global_entry - > orig_node = orig_node ;
2011-04-27 16:27:44 +04:00
tt_global_entry - > ttvn = ttvn ;
2011-04-27 16:27:57 +04:00
tt_global_entry - > roam_at = 0 ;
2011-04-27 16:28:07 +04:00
2011-11-02 23:26:45 +04:00
hash_added = hash_add ( bat_priv - > tt_global_hash , compare_tt ,
choose_orig , & tt_global_entry - > common ,
& tt_global_entry - > common . hash_entry ) ;
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
tt_global_entry_free_ref ( tt_global_entry ) ;
goto out_remove ;
}
2011-04-27 16:28:07 +04:00
atomic_inc ( & orig_node - > tt_size ) ;
2011-04-27 16:27:44 +04:00
} else {
if ( tt_global_entry - > orig_node ! = orig_node ) {
atomic_dec ( & tt_global_entry - > orig_node - > tt_size ) ;
orig_node_tmp = tt_global_entry - > orig_node ;
atomic_inc ( & orig_node - > refcount ) ;
tt_global_entry - > orig_node = orig_node ;
orig_node_free_ref ( orig_node_tmp ) ;
atomic_inc ( & orig_node - > tt_size ) ;
}
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . flags = NO_FLAGS ;
2011-04-27 16:27:57 +04:00
tt_global_entry - > ttvn = ttvn ;
tt_global_entry - > roam_at = 0 ;
2011-04-27 16:27:44 +04:00
}
2010-12-13 14:19:28 +03:00
2011-07-07 17:35:35 +04:00
if ( wifi )
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . flags | = TT_CLIENT_WIFI ;
2011-07-07 17:35:35 +04:00
2011-04-27 16:27:44 +04:00
bat_dbg ( DBG_TT , bat_priv ,
" Creating new global tt entry: %pM (via %pM) \n " ,
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . addr , orig_node - > orig ) ;
2010-12-13 14:19:28 +03:00
2011-11-02 23:26:45 +04:00
out_remove :
2011-04-27 16:27:44 +04:00
/* remove address from local hash if present */
2011-10-30 15:17:33 +04:00
tt_local_remove ( bat_priv , tt_global_entry - > common . addr ,
2011-04-27 16:28:07 +04:00
" global tt received " , roaming ) ;
ret = 1 ;
out :
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-05-05 10:42:45 +04:00
int tt_global_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-05-05 10:42:45 +04:00
struct tt_global_entry * tt_global_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret = 0 ;
2011-04-20 17:40:58 +04:00
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - please "
" specify interfaces to enable it \n " ,
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" primary interface not active \n " ,
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-05-05 10:42:45 +04:00
seq_printf ( seq ,
" Globally announced TT entries received via the mesh %s \n " ,
2010-12-13 14:19:28 +03:00
net_dev - > name ) ;
2011-07-07 17:35:38 +04:00
seq_printf ( seq , " %-13s %s %-15s %s %s \n " ,
" Client " , " (TTVN) " , " Originator " , " (Curr TTVN) " , " Flags " ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-10-22 20:15:26 +04:00
seq_printf ( seq , " * %pM (%3u) via %pM (%3u) "
2011-10-30 15:17:33 +04:00
" [%c%c%c] \n " ,
tt_global_entry - > common . addr ,
2011-04-27 16:27:44 +04:00
tt_global_entry - > ttvn ,
tt_global_entry - > orig_node - > orig ,
( uint8_t ) atomic_read (
& tt_global_entry - > orig_node - >
2011-07-07 17:35:38 +04:00
last_ttvn ) ,
2011-10-30 15:17:33 +04:00
( tt_global_entry - > common . flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_global_entry - > common . flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_PENDING ? ' X ' : ' . ' ) ,
2011-10-30 15:17:33 +04:00
( tt_global_entry - > common . flags &
2011-07-07 17:35:38 +04:00
TT_CLIENT_WIFI ? ' W ' : ' . ' ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
hardif_free_ref ( primary_if ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static void _tt_global_del ( struct bat_priv * bat_priv ,
struct tt_global_entry * tt_global_entry ,
const char * message )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
bat_dbg ( DBG_TT , bat_priv ,
2011-05-05 10:42:45 +04:00
" Deleting global tt entry %pM (via %pM): %s \n " ,
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . addr , tt_global_entry - > orig_node - > orig ,
2010-12-13 14:19:28 +03:00
message ) ;
2011-04-27 16:27:44 +04:00
atomic_dec ( & tt_global_entry - > orig_node - > tt_size ) ;
2011-04-27 16:28:07 +04:00
2011-10-30 15:17:33 +04:00
hash_remove ( bat_priv - > tt_global_hash , compare_tt , choose_orig ,
tt_global_entry - > common . addr ) ;
2011-04-27 16:28:07 +04:00
out :
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
void tt_global_del ( struct bat_priv * bat_priv ,
struct orig_node * orig_node , const unsigned char * addr ,
2011-04-27 16:27:57 +04:00
const char * message , bool roaming )
2011-04-27 16:27:44 +04:00
{
2011-04-27 16:28:07 +04:00
struct tt_global_entry * tt_global_entry = NULL ;
2011-12-05 01:38:27 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
2011-04-27 16:27:44 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_global_entry )
goto out ;
2011-04-27 16:27:44 +04:00
2011-04-27 16:28:07 +04:00
if ( tt_global_entry - > orig_node = = orig_node ) {
2011-04-27 16:27:57 +04:00
if ( roaming ) {
2011-12-05 01:38:27 +04:00
/* if we are deleting a global entry due to a roam
* event , there are two possibilities :
* 1 ) the client roamed from node A to node B = > we mark
* it with TT_CLIENT_ROAM , we start a timer and we
* wait for node B to claim it . In case of timeout
* the entry is purged .
* 2 ) the client roamed to us = > we can directly delete
* the global entry , since it is useless now . */
tt_local_entry = tt_local_hash_find ( bat_priv ,
2011-12-16 11:11:14 +04:00
tt_global_entry - > common . addr ) ;
2011-12-05 01:38:27 +04:00
if ( ! tt_local_entry ) {
2011-12-16 11:11:14 +04:00
tt_global_entry - > common . flags | = TT_CLIENT_ROAM ;
2011-12-05 01:38:27 +04:00
tt_global_entry - > roam_at = jiffies ;
goto out ;
}
2011-04-27 16:27:57 +04:00
}
2011-04-27 16:27:44 +04:00
_tt_global_del ( bat_priv , tt_global_entry , message ) ;
}
2011-04-27 16:27:57 +04:00
out :
2011-04-27 16:28:07 +04:00
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
2011-12-05 01:38:27 +04:00
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:27:44 +04:00
}
2011-05-05 10:42:45 +04:00
void tt_global_del_orig ( struct bat_priv * bat_priv ,
2011-04-27 16:27:44 +04:00
struct orig_node * orig_node , const char * message )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
struct tt_global_entry * tt_global_entry ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
struct hlist_node * node , * safe ;
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2010-12-13 14:19:28 +03:00
2011-10-19 12:28:26 +04:00
if ( ! hash )
return ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , safe ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-04-27 16:28:07 +04:00
if ( tt_global_entry - > orig_node = = orig_node ) {
bat_dbg ( DBG_TT , bat_priv ,
" Deleting global tt entry %pM "
2011-09-19 14:29:19 +04:00
" (via %pM): %s \n " ,
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . addr ,
2011-09-19 14:29:19 +04:00
tt_global_entry - > orig_node - > orig ,
message ) ;
2011-04-27 16:28:07 +04:00
hlist_del_rcu ( node ) ;
tt_global_entry_free_ref ( tt_global_entry ) ;
}
2011-04-27 16:27:44 +04:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
atomic_set ( & orig_node - > tt_size , 0 ) ;
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = false ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:57 +04:00
static void tt_global_roam_purge ( struct bat_priv * bat_priv )
{
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:57 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:57 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2011-04-27 16:27:57 +04:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:27:57 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
if ( ! ( tt_global_entry - > common . flags & TT_CLIENT_ROAM ) )
2011-04-27 16:27:57 +04:00
continue ;
2011-12-08 16:32:41 +04:00
if ( ! has_timed_out ( tt_global_entry - > roam_at ,
2011-12-20 15:30:40 +04:00
TT_CLIENT_ROAM_TIMEOUT ) )
2011-04-27 16:27:57 +04:00
continue ;
2011-04-27 16:28:07 +04:00
bat_dbg ( DBG_TT , bat_priv , " Deleting global "
" tt entry (%pM): Roaming timeout \n " ,
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . addr ) ;
2011-04-27 16:28:07 +04:00
atomic_dec ( & tt_global_entry - > orig_node - > tt_size ) ;
hlist_del_rcu ( node ) ;
tt_global_entry_free_ref ( tt_global_entry ) ;
2011-04-27 16:27:57 +04:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2011-04-27 16:27:57 +04:00
}
}
2011-04-27 16:27:44 +04:00
static void tt_global_table_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:28:07 +04:00
struct hashtable_t * hash ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:28:07 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:28:07 +04:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_global_hash )
2010-12-13 14:19:28 +03:00
return ;
2011-04-27 16:28:07 +04:00
hash = bat_priv - > tt_global_hash ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:28:07 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-04-27 16:28:07 +04:00
tt_global_entry_free_ref ( tt_global_entry ) ;
}
spin_unlock_bh ( list_lock ) ;
}
hash_destroy ( hash ) ;
2011-05-05 10:42:45 +04:00
bat_priv - > tt_global_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2011-07-07 17:35:36 +04:00
static bool _is_ap_isolated ( struct tt_local_entry * tt_local_entry ,
struct tt_global_entry * tt_global_entry )
{
bool ret = false ;
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_WIFI & &
tt_global_entry - > common . flags & TT_CLIENT_WIFI )
2011-07-07 17:35:36 +04:00
ret = true ;
return ret ;
}
2011-05-15 01:14:50 +04:00
struct orig_node * transtable_search ( struct bat_priv * bat_priv ,
2011-07-07 17:35:37 +04:00
const uint8_t * src , const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2011-07-07 17:35:37 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-02-18 15:28:10 +03:00
struct orig_node * orig_node = NULL ;
2010-12-13 14:19:28 +03:00
2011-07-07 17:35:37 +04:00
if ( src & & atomic_read ( & bat_priv - > ap_isolation ) ) {
tt_local_entry = tt_local_hash_find ( bat_priv , src ) ;
if ( ! tt_local_entry )
goto out ;
}
2011-02-18 15:28:09 +03:00
2011-07-07 17:35:37 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_global_entry )
2011-02-18 15:28:10 +03:00
goto out ;
2011-02-18 15:28:09 +03:00
2011-07-07 17:35:37 +04:00
/* check whether the clients should not communicate due to AP
* isolation */
if ( tt_local_entry & & _is_ap_isolated ( tt_local_entry , tt_global_entry ) )
goto out ;
2011-05-05 10:42:45 +04:00
if ( ! atomic_inc_not_zero ( & tt_global_entry - > orig_node - > refcount ) )
2011-07-07 17:35:37 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2011-07-07 03:40:59 +04:00
/* A global client marked as PENDING has already moved from that
* originator */
2011-10-30 15:17:33 +04:00
if ( tt_global_entry - > common . flags & TT_CLIENT_PENDING )
2011-07-07 17:35:37 +04:00
goto out ;
2011-07-07 03:40:59 +04:00
2011-05-05 10:42:45 +04:00
orig_node = tt_global_entry - > orig_node ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:10 +03:00
out :
2011-07-07 17:35:37 +04:00
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-02-18 15:28:10 +03:00
return orig_node ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
/* Calculates the checksum of the local table of a given orig_node */
uint16_t tt_global_crc ( struct bat_priv * bat_priv , struct orig_node * orig_node )
{
uint16_t total = 0 , total_one ;
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-04-27 16:27:44 +04:00
if ( compare_eth ( tt_global_entry - > orig_node ,
orig_node ) ) {
2011-04-27 16:27:57 +04:00
/* Roaming clients are in the global table for
* consistency only . They don ' t have to be
* taken into account while computing the
* global crc */
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_ROAM )
2011-04-27 16:27:57 +04:00
continue ;
2011-04-27 16:27:44 +04:00
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
2011-10-30 15:17:33 +04:00
tt_common_entry - > addr [ j ] ) ;
2011-04-27 16:27:44 +04:00
total ^ = total_one ;
}
}
rcu_read_unlock ( ) ;
}
return total ;
}
/* Calculates the checksum of the local table */
uint16_t tt_local_crc ( struct bat_priv * bat_priv )
{
uint16_t total = 0 , total_one ;
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
2011-07-07 03:40:58 +04:00
/* not yet committed clients have not to be taken into
* account while computing the CRC */
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
continue ;
2011-04-27 16:27:44 +04:00
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
2011-10-30 15:17:33 +04:00
tt_common_entry - > addr [ j ] ) ;
2011-04-27 16:27:44 +04:00
total ^ = total_one ;
}
rcu_read_unlock ( ) ;
}
return total ;
}
static void tt_req_list_free ( struct bat_priv * bat_priv )
{
struct tt_req_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
}
void tt_save_orig_buffer ( struct bat_priv * bat_priv , struct orig_node * orig_node ,
const unsigned char * tt_buff , uint8_t tt_num_changes )
{
uint16_t tt_buff_len = tt_len ( tt_num_changes ) ;
/* Replace the old buffer only if I received something in the
* last OGM ( the OGM could carry no changes ) */
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
if ( tt_buff_len > 0 ) {
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = kmalloc ( tt_buff_len , GFP_ATOMIC ) ;
if ( orig_node - > tt_buff ) {
memcpy ( orig_node - > tt_buff , tt_buff , tt_buff_len ) ;
orig_node - > tt_buff_len = tt_buff_len ;
}
}
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
}
static void tt_req_purge ( struct bat_priv * bat_priv )
{
struct tt_req_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
2011-12-20 15:30:40 +04:00
if ( has_timed_out ( node - > issued_at , TT_REQUEST_TIMEOUT ) ) {
2011-04-27 16:27:44 +04:00
list_del ( & node - > list ) ;
kfree ( node ) ;
}
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
}
/* returns the pointer to the new tt_req_node struct if no request
* has already been issued for this orig_node , NULL otherwise */
static struct tt_req_node * new_tt_req_node ( struct bat_priv * bat_priv ,
struct orig_node * orig_node )
{
struct tt_req_node * tt_req_node_tmp , * tt_req_node = NULL ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry ( tt_req_node_tmp , & bat_priv - > tt_req_list , list ) {
if ( compare_eth ( tt_req_node_tmp , orig_node ) & &
2011-12-08 16:32:41 +04:00
! has_timed_out ( tt_req_node_tmp - > issued_at ,
2011-12-20 15:30:40 +04:00
TT_REQUEST_TIMEOUT ) )
2011-04-27 16:27:44 +04:00
goto unlock ;
}
tt_req_node = kmalloc ( sizeof ( * tt_req_node ) , GFP_ATOMIC ) ;
if ( ! tt_req_node )
goto unlock ;
memcpy ( tt_req_node - > addr , orig_node - > orig , ETH_ALEN ) ;
tt_req_node - > issued_at = jiffies ;
list_add ( & tt_req_node - > list , & bat_priv - > tt_req_list ) ;
unlock :
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
return tt_req_node ;
}
2011-07-07 03:40:58 +04:00
/* data_ptr is useless here, but has to be kept to respect the prototype */
static int tt_local_valid_entry ( const void * entry_ptr , const void * data_ptr )
{
2011-10-30 15:17:33 +04:00
const struct tt_common_entry * tt_common_entry = entry_ptr ;
2011-07-07 03:40:58 +04:00
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
return 0 ;
return 1 ;
}
2011-04-27 16:27:44 +04:00
static int tt_global_valid_entry ( const void * entry_ptr , const void * data_ptr )
{
2011-10-30 15:17:33 +04:00
const struct tt_common_entry * tt_common_entry = entry_ptr ;
const struct tt_global_entry * tt_global_entry ;
2011-04-27 16:27:44 +04:00
const struct orig_node * orig_node = data_ptr ;
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_ROAM )
2011-04-27 16:27:57 +04:00
return 0 ;
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry , struct tt_global_entry ,
common ) ;
2011-04-27 16:27:44 +04:00
return ( tt_global_entry - > orig_node = = orig_node ) ;
}
static struct sk_buff * tt_response_fill_table ( uint16_t tt_len , uint8_t ttvn ,
struct hashtable_t * hash ,
struct hard_iface * primary_if ,
int ( * valid_cb ) ( const void * ,
const void * ) ,
void * cb_data )
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_query_packet * tt_response ;
struct tt_change * tt_change ;
struct hlist_node * node ;
struct hlist_head * head ;
struct sk_buff * skb = NULL ;
uint16_t tt_tot , tt_count ;
ssize_t tt_query_size = sizeof ( struct tt_query_packet ) ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
if ( tt_query_size + tt_len > primary_if - > soft_iface - > mtu ) {
tt_len = primary_if - > soft_iface - > mtu - tt_query_size ;
tt_len - = tt_len % sizeof ( struct tt_change ) ;
}
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( tt_query_size + tt_len + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
tt_query_size + tt_len ) ;
tt_response - > ttvn = ttvn ;
tt_change = ( struct tt_change * ) ( skb - > data + tt_query_size ) ;
tt_count = 0 ;
rcu_read_lock ( ) ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
if ( tt_count = = tt_tot )
break ;
2011-10-30 15:17:33 +04:00
if ( ( valid_cb ) & & ( ! valid_cb ( tt_common_entry , cb_data ) ) )
2011-04-27 16:27:44 +04:00
continue ;
2011-10-30 15:17:33 +04:00
memcpy ( tt_change - > addr , tt_common_entry - > addr ,
ETH_ALEN ) ;
2011-04-27 16:27:44 +04:00
tt_change - > flags = NO_FLAGS ;
tt_count + + ;
tt_change + + ;
}
}
rcu_read_unlock ( ) ;
2011-10-17 16:25:13 +04:00
/* store in the message the number of entries we have successfully
* copied */
tt_response - > tt_data = htons ( tt_count ) ;
2011-04-27 16:27:44 +04:00
out :
return skb ;
}
2011-07-30 15:10:18 +04:00
static int send_tt_request ( struct bat_priv * bat_priv ,
struct orig_node * dst_orig_node ,
uint8_t ttvn , uint16_t tt_crc , bool full_table )
2011-04-27 16:27:44 +04:00
{
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_request ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if ;
struct tt_req_node * tt_req_node = NULL ;
int ret = 1 ;
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
goto out ;
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet */
tt_req_node = new_tt_req_node ( bat_priv , dst_orig_node ) ;
if ( ! tt_req_node )
goto out ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_request = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) ) ;
2011-11-20 18:47:38 +04:00
tt_request - > header . packet_type = BAT_TT_QUERY ;
tt_request - > header . version = COMPAT_VERSION ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_request - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_request - > dst , dst_orig_node - > orig , ETH_ALEN ) ;
2011-11-20 18:47:38 +04:00
tt_request - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
tt_request - > ttvn = ttvn ;
tt_request - > tt_data = tt_crc ;
tt_request - > flags = TT_REQUEST ;
if ( full_table )
tt_request - > flags | = TT_FULL_TABLE ;
neigh_node = orig_node_get_router ( dst_orig_node ) ;
if ( ! neigh_node )
goto out ;
bat_dbg ( DBG_TT , bat_priv , " Sending TT_REQUEST to %pM via %pM "
" [%c] \n " , dst_orig_node - > orig , neigh_node - > addr ,
( full_table ? ' F ' : ' . ' ) ) ;
send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
ret = 0 ;
out :
if ( neigh_node )
neigh_node_free_ref ( neigh_node ) ;
if ( primary_if )
hardif_free_ref ( primary_if ) ;
if ( ret )
kfree_skb ( skb ) ;
if ( ret & & tt_req_node ) {
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_del ( & tt_req_node - > list ) ;
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
kfree ( tt_req_node ) ;
}
return ret ;
}
static bool send_other_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
{
struct orig_node * req_dst_orig_node = NULL , * res_dst_orig_node = NULL ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if = NULL ;
uint8_t orig_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_response ;
bat_dbg ( DBG_TT , bat_priv ,
" Received TT_REQUEST from %pM for "
" ttvn: %u (%pM) [%c] \n " , tt_request - > src ,
tt_request - > ttvn , tt_request - > dst ,
( tt_request - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
/* Let's get the orig node of the REAL destination */
2011-10-12 16:54:50 +04:00
req_dst_orig_node = orig_hash_find ( bat_priv , tt_request - > dst ) ;
2011-04-27 16:27:44 +04:00
if ( ! req_dst_orig_node )
goto out ;
2011-10-12 16:54:50 +04:00
res_dst_orig_node = orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! res_dst_orig_node )
goto out ;
neigh_node = orig_node_get_router ( res_dst_orig_node ) ;
if ( ! neigh_node )
goto out ;
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
goto out ;
orig_ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
req_ttvn = tt_request - > ttvn ;
2011-07-09 19:52:13 +04:00
/* I don't have the requested data */
2011-04-27 16:27:44 +04:00
if ( orig_ttvn ! = req_ttvn | |
tt_request - > tt_data ! = req_dst_orig_node - > tt_crc )
goto out ;
2011-07-09 19:52:13 +04:00
/* If the full table has been explicitly requested */
2011-04-27 16:27:44 +04:00
if ( tt_request - > flags & TT_FULL_TABLE | |
! req_dst_orig_node - > tt_buff )
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
* I ' ll send only one packet with as much TT entries as I can */
if ( ! full_table ) {
spin_lock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
tt_len = req_dst_orig_node - > tt_buff_len ;
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) +
tt_len + ETH_HLEN ) ;
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) + tt_len ) ;
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
tt_buff = skb - > data + sizeof ( struct tt_query_packet ) ;
/* Copy the last orig_node's OGM buffer */
memcpy ( tt_buff , req_dst_orig_node - > tt_buff ,
req_dst_orig_node - > tt_buff_len ) ;
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
} else {
tt_len = ( uint16_t ) atomic_read ( & req_dst_orig_node - > tt_size ) *
sizeof ( struct tt_change ) ;
ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
skb = tt_response_fill_table ( tt_len , ttvn ,
bat_priv - > tt_global_hash ,
primary_if , tt_global_valid_entry ,
req_dst_orig_node ) ;
if ( ! skb )
goto out ;
tt_response = ( struct tt_query_packet * ) skb - > data ;
}
2011-11-20 18:47:38 +04:00
tt_response - > header . packet_type = BAT_TT_QUERY ;
tt_response - > header . version = COMPAT_VERSION ;
tt_response - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , req_dst_orig_node - > orig , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
tt_response - > flags = TT_RESPONSE ;
if ( full_table )
tt_response - > flags | = TT_FULL_TABLE ;
bat_dbg ( DBG_TT , bat_priv ,
" Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u) \n " ,
res_dst_orig_node - > orig , neigh_node - > addr ,
req_dst_orig_node - > orig , req_ttvn ) ;
send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
ret = true ;
goto out ;
unlock :
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
out :
if ( res_dst_orig_node )
orig_node_free_ref ( res_dst_orig_node ) ;
if ( req_dst_orig_node )
orig_node_free_ref ( req_dst_orig_node ) ;
if ( neigh_node )
neigh_node_free_ref ( neigh_node ) ;
if ( primary_if )
hardif_free_ref ( primary_if ) ;
if ( ! ret )
kfree_skb ( skb ) ;
return ret ;
}
static bool send_my_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
{
struct orig_node * orig_node = NULL ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if = NULL ;
uint8_t my_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_response ;
bat_dbg ( DBG_TT , bat_priv ,
" Received TT_REQUEST from %pM for "
" ttvn: %u (me) [%c] \n " , tt_request - > src ,
tt_request - > ttvn ,
( tt_request - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
my_ttvn = ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ;
req_ttvn = tt_request - > ttvn ;
2011-10-12 16:54:50 +04:00
orig_node = orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
neigh_node = orig_node_get_router ( orig_node ) ;
if ( ! neigh_node )
goto out ;
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
goto out ;
/* If the full table has been explicitly requested or the gap
* is too big send the whole local translation table */
if ( tt_request - > flags & TT_FULL_TABLE | | my_ttvn ! = req_ttvn | |
! bat_priv - > tt_buff )
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
* I ' ll send only one packet with as much TT entries as I can */
if ( ! full_table ) {
spin_lock_bh ( & bat_priv - > tt_buff_lock ) ;
tt_len = bat_priv - > tt_buff_len ;
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) +
tt_len + ETH_HLEN ) ;
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) + tt_len ) ;
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
tt_buff = skb - > data + sizeof ( struct tt_query_packet ) ;
memcpy ( tt_buff , bat_priv - > tt_buff ,
bat_priv - > tt_buff_len ) ;
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
} else {
tt_len = ( uint16_t ) atomic_read ( & bat_priv - > num_local_tt ) *
sizeof ( struct tt_change ) ;
ttvn = ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ;
skb = tt_response_fill_table ( tt_len , ttvn ,
bat_priv - > tt_local_hash ,
2011-07-07 03:40:58 +04:00
primary_if , tt_local_valid_entry ,
NULL ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
tt_response = ( struct tt_query_packet * ) skb - > data ;
}
2011-11-20 18:47:38 +04:00
tt_response - > header . packet_type = BAT_TT_QUERY ;
tt_response - > header . version = COMPAT_VERSION ;
tt_response - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
tt_response - > flags = TT_RESPONSE ;
if ( full_table )
tt_response - > flags | = TT_FULL_TABLE ;
bat_dbg ( DBG_TT , bat_priv ,
" Sending TT_RESPONSE to %pM via %pM [%c] \n " ,
orig_node - > orig , neigh_node - > addr ,
( tt_response - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
ret = true ;
goto out ;
unlock :
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
out :
if ( orig_node )
orig_node_free_ref ( orig_node ) ;
if ( neigh_node )
neigh_node_free_ref ( neigh_node ) ;
if ( primary_if )
hardif_free_ref ( primary_if ) ;
if ( ! ret )
kfree_skb ( skb ) ;
/* This packet was for me, so it doesn't need to be re-routed */
return true ;
}
bool send_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
{
if ( is_my_mac ( tt_request - > dst ) )
return send_my_tt_response ( bat_priv , tt_request ) ;
else
return send_other_tt_response ( bat_priv , tt_request ) ;
}
static void _tt_update_changes ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
struct tt_change * tt_change ,
uint16_t tt_num_changes , uint8_t ttvn )
{
int i ;
for ( i = 0 ; i < tt_num_changes ; i + + ) {
2011-06-17 18:11:27 +04:00
if ( ( tt_change + i ) - > flags & TT_CLIENT_DEL )
2011-04-27 16:27:44 +04:00
tt_global_del ( bat_priv , orig_node ,
( tt_change + i ) - > addr ,
2011-04-27 16:27:57 +04:00
" tt removed by changes " ,
( tt_change + i ) - > flags & TT_CLIENT_ROAM ) ;
2011-04-27 16:27:44 +04:00
else
if ( ! tt_global_add ( bat_priv , orig_node ,
2011-07-07 17:35:35 +04:00
( tt_change + i ) - > addr , ttvn , false ,
( tt_change + i ) - > flags &
TT_CLIENT_WIFI ) )
2011-04-27 16:27:44 +04:00
/* In case of problem while storing a
* global_entry , we stop the updating
* procedure without committing the
* ttvn change . This will avoid to send
* corrupted data on tt_request
*/
return ;
}
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = true ;
2011-04-27 16:27:44 +04:00
}
static void tt_fill_gtable ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_response )
{
struct orig_node * orig_node = NULL ;
orig_node = orig_hash_find ( bat_priv , tt_response - > src ) ;
if ( ! orig_node )
goto out ;
/* Purge the old table first.. */
tt_global_del_orig ( bat_priv , orig_node , " Received full table " ) ;
_tt_update_changes ( bat_priv , orig_node ,
( struct tt_change * ) ( tt_response + 1 ) ,
tt_response - > tt_data , tt_response - > ttvn ) ;
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = NULL ;
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
atomic_set ( & orig_node - > last_ttvn , tt_response - > ttvn ) ;
out :
if ( orig_node )
orig_node_free_ref ( orig_node ) ;
}
2011-07-30 15:10:18 +04:00
static void tt_update_changes ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
uint16_t tt_num_changes , uint8_t ttvn ,
struct tt_change * tt_change )
2011-04-27 16:27:44 +04:00
{
_tt_update_changes ( bat_priv , orig_node , tt_change , tt_num_changes ,
ttvn ) ;
tt_save_orig_buffer ( bat_priv , orig_node , ( unsigned char * ) tt_change ,
tt_num_changes ) ;
atomic_set ( & orig_node - > last_ttvn , ttvn ) ;
}
bool is_my_client ( struct bat_priv * bat_priv , const uint8_t * addr )
{
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
bool ret = false ;
2011-04-27 16:27:44 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2011-07-07 03:40:58 +04:00
/* Check if the client has been logically deleted (but is kept for
* consistency purpose ) */
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_PENDING )
2011-07-07 03:40:58 +04:00
goto out ;
2011-04-27 16:28:07 +04:00
ret = true ;
out :
2011-04-27 16:27:44 +04:00
if ( tt_local_entry )
2011-04-27 16:28:07 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
return ret ;
2011-04-27 16:27:44 +04:00
}
void handle_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_response )
{
struct tt_req_node * node , * safe ;
struct orig_node * orig_node = NULL ;
bat_dbg ( DBG_TT , bat_priv , " Received TT_RESPONSE from %pM for "
" ttvn %d t_size: %d [%c] \n " ,
tt_response - > src , tt_response - > ttvn ,
tt_response - > tt_data ,
( tt_response - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
orig_node = orig_hash_find ( bat_priv , tt_response - > src ) ;
if ( ! orig_node )
goto out ;
if ( tt_response - > flags & TT_FULL_TABLE )
tt_fill_gtable ( bat_priv , tt_response ) ;
else
tt_update_changes ( bat_priv , orig_node , tt_response - > tt_data ,
tt_response - > ttvn ,
( struct tt_change * ) ( tt_response + 1 ) ) ;
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
if ( ! compare_eth ( node - > addr , tt_response - > src ) )
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
/* Recalculate the CRC for this orig_node and store it */
orig_node - > tt_crc = tt_global_crc ( bat_priv , orig_node ) ;
2011-04-27 16:27:57 +04:00
/* Roaming phase is over: tables are in sync again. I can
* unset the flag */
orig_node - > tt_poss_change = false ;
2011-04-27 16:27:44 +04:00
out :
if ( orig_node )
orig_node_free_ref ( orig_node ) ;
}
int tt_init ( struct bat_priv * bat_priv )
{
if ( ! tt_local_init ( bat_priv ) )
return 0 ;
if ( ! tt_global_init ( bat_priv ) )
return 0 ;
tt_start_timer ( bat_priv ) ;
return 1 ;
}
2011-04-27 16:27:57 +04:00
static void tt_roam_list_free ( struct bat_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2011-04-27 16:27:57 +04:00
struct tt_roam_node * node , * safe ;
2011-04-27 16:27:44 +04:00
2011-04-27 16:27:57 +04:00
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
2011-04-27 16:27:44 +04:00
2011-04-27 16:27:57 +04:00
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_roam_list , list ) {
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
}
static void tt_roam_purge ( struct bat_priv * bat_priv )
{
struct tt_roam_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_roam_list , list ) {
2011-12-20 15:30:40 +04:00
if ( ! has_timed_out ( node - > first_time , ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
}
/* This function checks whether the client already reached the
* maximum number of possible roaming phases . In this case the ROAMING_ADV
* will not be sent .
*
* returns true if the ROAMING_ADV can be sent , false otherwise */
static bool tt_check_roam_count ( struct bat_priv * bat_priv ,
uint8_t * client )
{
struct tt_roam_node * tt_roam_node ;
bool ret = false ;
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet */
list_for_each_entry ( tt_roam_node , & bat_priv - > tt_roam_list , list ) {
if ( ! compare_eth ( tt_roam_node - > addr , client ) )
continue ;
2011-12-20 15:30:40 +04:00
if ( has_timed_out ( tt_roam_node - > first_time , ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
if ( ! atomic_dec_not_zero ( & tt_roam_node - > counter ) )
/* Sorry, you roamed too many times! */
goto unlock ;
ret = true ;
break ;
}
if ( ! ret ) {
tt_roam_node = kmalloc ( sizeof ( * tt_roam_node ) , GFP_ATOMIC ) ;
if ( ! tt_roam_node )
goto unlock ;
tt_roam_node - > first_time = jiffies ;
atomic_set ( & tt_roam_node - > counter , ROAMING_MAX_COUNT - 1 ) ;
memcpy ( tt_roam_node - > addr , client , ETH_ALEN ) ;
list_add ( & tt_roam_node - > list , & bat_priv - > tt_roam_list ) ;
ret = true ;
}
unlock :
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
return ret ;
}
void send_roam_adv ( struct bat_priv * bat_priv , uint8_t * client ,
struct orig_node * orig_node )
{
struct neigh_node * neigh_node = NULL ;
struct sk_buff * skb = NULL ;
struct roam_adv_packet * roam_adv_packet ;
int ret = 1 ;
struct hard_iface * primary_if ;
/* before going on we have to check whether the client has
* already roamed to us too many times */
if ( ! tt_check_roam_count ( bat_priv , client ) )
goto out ;
skb = dev_alloc_skb ( sizeof ( struct roam_adv_packet ) + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
roam_adv_packet = ( struct roam_adv_packet * ) skb_put ( skb ,
sizeof ( struct roam_adv_packet ) ) ;
2011-11-20 18:47:38 +04:00
roam_adv_packet - > header . packet_type = BAT_ROAM_ADV ;
roam_adv_packet - > header . version = COMPAT_VERSION ;
roam_adv_packet - > header . ttl = TTL ;
2011-04-27 16:27:57 +04:00
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
goto out ;
memcpy ( roam_adv_packet - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
hardif_free_ref ( primary_if ) ;
memcpy ( roam_adv_packet - > dst , orig_node - > orig , ETH_ALEN ) ;
memcpy ( roam_adv_packet - > client , client , ETH_ALEN ) ;
neigh_node = orig_node_get_router ( orig_node ) ;
if ( ! neigh_node )
goto out ;
bat_dbg ( DBG_TT , bat_priv ,
" Sending ROAMING_ADV to %pM (client %pM) via %pM \n " ,
orig_node - > orig , client , neigh_node - > addr ) ;
send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
ret = 0 ;
out :
if ( neigh_node )
neigh_node_free_ref ( neigh_node ) ;
if ( ret )
kfree_skb ( skb ) ;
return ;
2011-04-27 16:27:44 +04:00
}
static void tt_purge ( struct work_struct * work )
{
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
struct bat_priv * bat_priv =
container_of ( delayed_work , struct bat_priv , tt_work ) ;
tt_local_purge ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
tt_global_roam_purge ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
tt_req_purge ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
tt_roam_purge ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
tt_start_timer ( bat_priv ) ;
}
2011-04-27 16:27:57 +04:00
void tt_free ( struct bat_priv * bat_priv )
{
cancel_delayed_work_sync ( & bat_priv - > tt_work ) ;
tt_local_table_free ( bat_priv ) ;
tt_global_table_free ( bat_priv ) ;
tt_req_list_free ( bat_priv ) ;
tt_changes_list_free ( bat_priv ) ;
tt_roam_list_free ( bat_priv ) ;
kfree ( bat_priv - > tt_buff ) ;
}
2011-07-07 03:40:58 +04:00
2011-11-07 19:47:01 +04:00
/* This function will enable or disable the specified flags for all the entries
* in the given hash table and returns the number of modified entries */
static uint16_t tt_set_flags ( struct hashtable_t * hash , uint16_t flags ,
bool enable )
2011-07-07 03:40:58 +04:00
{
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-11-07 19:47:01 +04:00
uint16_t changed_num = 0 ;
2011-07-07 03:40:58 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
2011-11-07 19:47:01 +04:00
goto out ;
2011-07-07 03:40:58 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-07-07 03:40:58 +04:00
head , hash_entry ) {
2011-11-07 19:47:01 +04:00
if ( enable ) {
if ( ( tt_common_entry - > flags & flags ) = = flags )
continue ;
tt_common_entry - > flags | = flags ;
} else {
if ( ! ( tt_common_entry - > flags & flags ) )
continue ;
tt_common_entry - > flags & = ~ flags ;
}
changed_num + + ;
2011-07-07 03:40:58 +04:00
}
rcu_read_unlock ( ) ;
}
2011-11-07 19:47:01 +04:00
out :
return changed_num ;
2011-07-07 03:40:58 +04:00
}
/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
static void tt_local_purge_pending_clients ( struct bat_priv * bat_priv )
{
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-07-07 03:40:58 +04:00
struct tt_local_entry * tt_local_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
return ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-07-07 03:40:58 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
if ( ! ( tt_common_entry - > flags & TT_CLIENT_PENDING ) )
2011-07-07 03:40:58 +04:00
continue ;
bat_dbg ( DBG_TT , bat_priv , " Deleting local tt entry "
2011-10-30 15:17:33 +04:00
" (%pM): pending \n " , tt_common_entry - > addr ) ;
2011-07-07 03:40:58 +04:00
atomic_dec ( & bat_priv - > num_local_tt ) ;
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
2011-07-07 03:40:58 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
}
spin_unlock_bh ( list_lock ) ;
}
}
void tt_commit_changes ( struct bat_priv * bat_priv )
{
2011-11-07 19:47:01 +04:00
uint16_t changed_num = tt_set_flags ( bat_priv - > tt_local_hash ,
TT_CLIENT_NEW , false ) ;
/* all the reset entries have now to be effectively counted as local
* entries */
atomic_add ( changed_num , & bat_priv - > num_local_tt ) ;
2011-07-07 03:40:58 +04:00
tt_local_purge_pending_clients ( bat_priv ) ;
/* Increment the TTVN only once per OGM interval */
atomic_inc ( & bat_priv - > ttvn ) ;
bat_priv - > tt_poss_change = false ;
}
2011-07-07 17:35:36 +04:00
bool is_ap_isolated ( struct bat_priv * bat_priv , uint8_t * src , uint8_t * dst )
{
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
bool ret = true ;
if ( ! atomic_read ( & bat_priv - > ap_isolation ) )
return false ;
tt_local_entry = tt_local_hash_find ( bat_priv , dst ) ;
if ( ! tt_local_entry )
goto out ;
tt_global_entry = tt_global_hash_find ( bat_priv , src ) ;
if ( ! tt_global_entry )
goto out ;
if ( _is_ap_isolated ( tt_local_entry , tt_global_entry ) )
goto out ;
ret = false ;
out :
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
return ret ;
}
2011-07-30 15:10:18 +04:00
void tt_update_orig ( struct bat_priv * bat_priv , struct orig_node * orig_node ,
const unsigned char * tt_buff , uint8_t tt_num_changes ,
uint8_t ttvn , uint16_t tt_crc )
{
uint8_t orig_ttvn = ( uint8_t ) atomic_read ( & orig_node - > last_ttvn ) ;
bool full_table = true ;
2011-11-07 19:36:40 +04:00
/* orig table not initialised AND first diff is in the OGM OR the ttvn
* increased by one - > we can apply the attached changes */
if ( ( ! orig_node - > tt_initialised & & ttvn = = 1 ) | |
ttvn - orig_ttvn = = 1 ) {
2011-07-30 15:10:18 +04:00
/* the OGM could not contain the changes due to their size or
* because they have already been sent TT_OGM_APPEND_MAX times .
* In this case send a tt request */
if ( ! tt_num_changes ) {
full_table = false ;
goto request_table ;
}
tt_update_changes ( bat_priv , orig_node , tt_num_changes , ttvn ,
( struct tt_change * ) tt_buff ) ;
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
* in the global table */
orig_node - > tt_crc = tt_global_crc ( bat_priv , orig_node ) ;
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
* ( due to the wrap around ) . Thus a node has to check whether
* the resulting table ( after applying the changes ) is still
* consistent or not . E . g . a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX : in this case
* checking the CRC value is mandatory to detect the
* inconsistency */
if ( orig_node - > tt_crc ! = tt_crc )
goto request_table ;
/* Roaming phase is over: tables are in sync again. I can
* unset the flag */
orig_node - > tt_poss_change = false ;
} else {
/* if we missed more than one change or our tables are not
* in sync anymore - > request fresh tt data */
2011-11-07 19:36:40 +04:00
if ( ! orig_node - > tt_initialised | | ttvn ! = orig_ttvn | |
orig_node - > tt_crc ! = tt_crc ) {
2011-07-30 15:10:18 +04:00
request_table :
bat_dbg ( DBG_TT , bat_priv , " TT inconsistency for %pM. "
" Need to retrieve the correct information "
" (ttvn: %u last_ttvn: %u crc: %u last_crc: "
" %u num_changes: %u) \n " , orig_node - > orig , ttvn ,
orig_ttvn , tt_crc , orig_node - > tt_crc ,
tt_num_changes ) ;
send_tt_request ( bat_priv , orig_node , ttvn , tt_crc ,
full_table ) ;
return ;
}
}
}