2012-05-12 04:09:43 +04:00
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
2012-03-14 16:03:01 +04:00
* Marek Lindner , Simon Wunderlich , Antonio Quartulli
2010-12-13 14:19:28 +03:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# include "main.h"
# include "translation-table.h"
# include "soft-interface.h"
2011-04-20 17:40:58 +04:00
# include "hard-interface.h"
2011-04-27 16:27:44 +04:00
# include "send.h"
2010-12-13 14:19:28 +03:00
# include "hash.h"
# include "originator.h"
2011-04-27 16:27:44 +04:00
# include "routing.h"
2012-01-22 23:00:23 +04:00
# include "bridge_loop_avoidance.h"
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
# include <linux/crc16.h>
2012-02-05 21:55:22 +04:00
static void send_roam_adv ( struct bat_priv * bat_priv , uint8_t * client ,
struct orig_node * orig_node ) ;
2011-04-27 16:27:44 +04:00
static void tt_purge ( struct work_struct * work ) ;
2011-10-22 22:12:51 +04:00
static void tt_global_del_orig_list ( struct tt_global_entry * tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:09 +03:00
/* returns 1 if they are the same mac addr */
2011-10-30 15:17:33 +04:00
static int compare_tt ( const struct hlist_node * node , const void * data2 )
2011-02-18 15:28:09 +03:00
{
2011-10-30 15:17:33 +04:00
const void * data1 = container_of ( node , struct tt_common_entry ,
2011-05-15 01:14:50 +04:00
hash_entry ) ;
2011-02-18 15:28:09 +03:00
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
2011-04-27 16:27:44 +04:00
static void tt_start_timer ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
INIT_DELAYED_WORK ( & bat_priv - > tt_work , tt_purge ) ;
2012-05-12 04:09:42 +04:00
queue_delayed_work ( batadv_event_workqueue , & bat_priv - > tt_work ,
2011-04-27 16:27:44 +04:00
msecs_to_jiffies ( 5000 ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-10-30 15:17:33 +04:00
static struct tt_common_entry * tt_hash_find ( struct hashtable_t * hash ,
const void * data )
2011-02-18 15:28:09 +03:00
{
struct hlist_head * head ;
struct hlist_node * node ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry , * tt_common_entry_tmp = NULL ;
2011-10-05 19:05:25 +04:00
uint32_t index ;
2011-02-18 15:28:09 +03:00
if ( ! hash )
return NULL ;
2012-05-12 15:48:56 +04:00
index = batadv_choose_orig ( data , hash - > size ) ;
2011-02-18 15:28:09 +03:00
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node , head , hash_entry ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( tt_common_entry , data ) )
2011-02-18 15:28:09 +03:00
continue ;
2011-10-30 15:17:33 +04:00
if ( ! atomic_inc_not_zero ( & tt_common_entry - > refcount ) )
2011-04-27 16:28:07 +04:00
continue ;
2011-10-30 15:17:33 +04:00
tt_common_entry_tmp = tt_common_entry ;
2011-02-18 15:28:09 +03:00
break ;
}
rcu_read_unlock ( ) ;
2011-10-30 15:17:33 +04:00
return tt_common_entry_tmp ;
2011-02-18 15:28:09 +03:00
}
2011-10-30 15:17:33 +04:00
static struct tt_local_entry * tt_local_hash_find ( struct bat_priv * bat_priv ,
const void * data )
2011-02-18 15:28:09 +03:00
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
struct tt_local_entry * tt_local_entry = NULL ;
2011-02-18 15:28:09 +03:00
2011-10-30 15:17:33 +04:00
tt_common_entry = tt_hash_find ( bat_priv - > tt_local_hash , data ) ;
if ( tt_common_entry )
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry , common ) ;
return tt_local_entry ;
}
2011-02-18 15:28:09 +03:00
2011-10-30 15:17:33 +04:00
static struct tt_global_entry * tt_global_hash_find ( struct bat_priv * bat_priv ,
const void * data )
{
struct tt_common_entry * tt_common_entry ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-04-27 16:28:07 +04:00
2011-10-30 15:17:33 +04:00
tt_common_entry = tt_hash_find ( bat_priv - > tt_global_hash , data ) ;
if ( tt_common_entry )
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry , common ) ;
return tt_global_entry ;
2011-02-18 15:28:09 +03:00
}
2011-04-27 16:28:07 +04:00
static void tt_local_entry_free_ref ( struct tt_local_entry * tt_local_entry )
{
2011-10-30 15:17:33 +04:00
if ( atomic_dec_and_test ( & tt_local_entry - > common . refcount ) )
kfree_rcu ( tt_local_entry , common . rcu ) ;
2011-04-27 16:28:07 +04:00
}
2011-10-19 13:02:25 +04:00
static void tt_global_entry_free_rcu ( struct rcu_head * rcu )
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-10-19 13:02:25 +04:00
struct tt_global_entry * tt_global_entry ;
2011-10-30 15:17:33 +04:00
tt_common_entry = container_of ( rcu , struct tt_common_entry , rcu ) ;
tt_global_entry = container_of ( tt_common_entry , struct tt_global_entry ,
common ) ;
2011-10-19 13:02:25 +04:00
kfree ( tt_global_entry ) ;
}
2011-04-27 16:28:07 +04:00
static void tt_global_entry_free_ref ( struct tt_global_entry * tt_global_entry )
{
2011-10-22 22:12:51 +04:00
if ( atomic_dec_and_test ( & tt_global_entry - > common . refcount ) ) {
tt_global_del_orig_list ( tt_global_entry ) ;
2011-10-30 15:17:33 +04:00
call_rcu ( & tt_global_entry - > common . rcu ,
tt_global_entry_free_rcu ) ;
2011-10-22 22:12:51 +04:00
}
}
static void tt_orig_list_entry_free_rcu ( struct rcu_head * rcu )
{
struct tt_orig_list_entry * orig_entry ;
orig_entry = container_of ( rcu , struct tt_orig_list_entry , rcu ) ;
atomic_dec ( & orig_entry - > orig_node - > tt_size ) ;
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_entry - > orig_node ) ;
2011-10-22 22:12:51 +04:00
kfree ( orig_entry ) ;
}
static void tt_orig_list_entry_free_ref ( struct tt_orig_list_entry * orig_entry )
{
call_rcu ( & orig_entry - > rcu , tt_orig_list_entry_free_rcu ) ;
2011-04-27 16:28:07 +04:00
}
2011-06-30 03:14:00 +04:00
static void tt_local_event ( struct bat_priv * bat_priv , const uint8_t * addr ,
uint8_t flags )
2011-04-27 16:27:44 +04:00
{
struct tt_change_node * tt_change_node ;
tt_change_node = kmalloc ( sizeof ( * tt_change_node ) , GFP_ATOMIC ) ;
if ( ! tt_change_node )
return ;
2011-06-30 03:14:00 +04:00
tt_change_node - > change . flags = flags ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_change_node - > change . addr , addr , ETH_ALEN ) ;
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
/* track the change in the OGMinterval list */
list_add_tail ( & tt_change_node - > list , & bat_priv - > tt_changes_list ) ;
atomic_inc ( & bat_priv - > tt_local_changes ) ;
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
atomic_set ( & bat_priv - > tt_ogm_append_cnt , 0 ) ;
}
2012-05-12 04:09:39 +04:00
int batadv_tt_len ( int changes_num )
2011-04-27 16:27:44 +04:00
{
return changes_num * sizeof ( struct tt_change ) ;
}
static int tt_local_init ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
if ( bat_priv - > tt_local_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:32 +04:00
bat_priv - > tt_local_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_local_hash )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:39 +04:00
void batadv_tt_local_add ( struct net_device * soft_iface , const uint8_t * addr ,
int ifindex )
2010-12-13 14:19:28 +03:00
{
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-10-22 22:12:51 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
struct tt_orig_list_entry * orig_entry ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( tt_local_entry ) {
tt_local_entry - > last_seen = jiffies ;
2012-01-16 03:36:58 +04:00
/* possibly unset the TT_CLIENT_PENDING flag */
tt_local_entry - > common . flags & = ~ TT_CLIENT_PENDING ;
2011-04-27 16:28:07 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-05-15 01:14:54 +04:00
tt_local_entry = kmalloc ( sizeof ( * tt_local_entry ) , GFP_ATOMIC ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_local_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Creating new local tt entry: %pM (ttvn: %d) \n " , addr ,
( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ) ;
2010-12-13 14:19:28 +03:00
2011-10-30 15:17:33 +04:00
memcpy ( tt_local_entry - > common . addr , addr , ETH_ALEN ) ;
tt_local_entry - > common . flags = NO_FLAGS ;
2012-05-12 04:09:31 +04:00
if ( batadv_is_wifi_iface ( ifindex ) )
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_WIFI ;
atomic_set ( & tt_local_entry - > common . refcount , 2 ) ;
tt_local_entry - > last_seen = jiffies ;
2010-12-13 14:19:28 +03:00
/* the batman interface mac address should never be purged */
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( addr , soft_iface - > dev_addr ) )
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_NOPURGE ;
2010-12-13 14:19:28 +03:00
2012-01-07 00:31:33 +04:00
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
2012-05-12 04:09:43 +04:00
* ( consistency check )
*/
2012-01-07 00:31:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_NEW ;
2012-05-12 15:48:55 +04:00
hash_added = batadv_hash_add ( bat_priv - > tt_local_hash , compare_tt ,
2012-05-12 15:48:56 +04:00
batadv_choose_orig ,
& tt_local_entry - > common ,
2012-05-12 15:48:55 +04:00
& tt_local_entry - > common . hash_entry ) ;
2011-11-02 23:26:45 +04:00
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
tt_local_entry_free_ref ( tt_local_entry ) ;
goto out ;
}
2011-10-30 15:17:33 +04:00
tt_local_event ( bat_priv , addr , tt_local_entry - > common . flags ) ;
2011-06-30 03:14:00 +04:00
2010-12-13 14:19:28 +03:00
/* remove address from global hash if present */
2011-05-05 10:42:45 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:57 +04:00
/* Check whether it is a roaming! */
if ( tt_global_entry ) {
2011-10-22 22:12:51 +04:00
/* These node are probably going to update their tt table */
head = & tt_global_entry - > orig_list ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
orig_entry - > orig_node - > tt_poss_change = true ;
send_roam_adv ( bat_priv , tt_global_entry - > common . addr ,
orig_entry - > orig_node ) ;
}
rcu_read_unlock ( ) ;
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
*/
2011-12-17 00:07:28 +04:00
tt_global_entry - > common . flags | = TT_CLIENT_ROAM ;
2011-12-04 15:26:50 +04:00
tt_global_entry - > roam_at = jiffies ;
2011-04-27 16:28:07 +04:00
}
out :
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-07 00:22:05 +04:00
static void tt_realloc_packet_buff ( unsigned char * * packet_buff ,
int * packet_buff_len , int min_packet_len ,
int new_packet_len )
{
unsigned char * new_buff ;
new_buff = kmalloc ( new_packet_len , GFP_ATOMIC ) ;
/* keep old buffer if kmalloc should fail */
if ( new_buff ) {
memcpy ( new_buff , * packet_buff , min_packet_len ) ;
kfree ( * packet_buff ) ;
* packet_buff = new_buff ;
* packet_buff_len = new_packet_len ;
}
}
static void tt_prepare_packet_buff ( struct bat_priv * bat_priv ,
unsigned char * * packet_buff ,
int * packet_buff_len , int min_packet_len )
{
struct hard_iface * primary_if ;
int req_len ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2012-05-07 00:22:05 +04:00
req_len = min_packet_len ;
2012-05-12 04:09:39 +04:00
req_len + = batadv_tt_len ( atomic_read ( & bat_priv - > tt_local_changes ) ) ;
2012-05-07 00:22:05 +04:00
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
*/
if ( ( ! primary_if ) | | ( req_len > primary_if - > soft_iface - > mtu ) )
req_len = min_packet_len ;
tt_realloc_packet_buff ( packet_buff , packet_buff_len ,
min_packet_len , req_len ) ;
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2012-05-07 00:22:05 +04:00
}
static int tt_changes_fill_buff ( struct bat_priv * bat_priv ,
unsigned char * * packet_buff ,
int * packet_buff_len , int min_packet_len )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
struct tt_change_node * entry , * safe ;
2012-05-07 00:22:05 +04:00
int count = 0 , tot_changes = 0 , new_len ;
unsigned char * tt_buff ;
tt_prepare_packet_buff ( bat_priv , packet_buff ,
packet_buff_len , min_packet_len ) ;
2010-12-13 14:19:28 +03:00
2012-05-07 00:22:05 +04:00
new_len = * packet_buff_len - min_packet_len ;
tt_buff = * packet_buff + min_packet_len ;
if ( new_len > 0 )
2012-05-12 04:09:39 +04:00
tot_changes = new_len / batadv_tt_len ( 1 ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
atomic_set ( & bat_priv - > tt_local_changes , 0 ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt_changes_list ,
2012-02-28 13:55:36 +04:00
list ) {
2011-04-27 16:27:44 +04:00
if ( count < tot_changes ) {
2012-05-12 04:09:39 +04:00
memcpy ( tt_buff + batadv_tt_len ( count ) ,
2011-04-27 16:27:44 +04:00
& entry - > change , sizeof ( struct tt_change ) ) ;
2010-12-13 14:19:28 +03:00
count + + ;
}
2011-04-27 16:27:44 +04:00
list_del ( & entry - > list ) ;
kfree ( entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
/* Keep the buffer for possible tt_request */
spin_lock_bh ( & bat_priv - > tt_buff_lock ) ;
kfree ( bat_priv - > tt_buff ) ;
bat_priv - > tt_buff_len = 0 ;
bat_priv - > tt_buff = NULL ;
2012-05-07 00:22:05 +04:00
/* check whether this new OGM has no changes due to size problems */
if ( new_len > 0 ) {
/* if kmalloc() fails we will reply with the full table
2011-04-27 16:27:44 +04:00
* instead of providing the diff
*/
2012-05-07 00:22:05 +04:00
bat_priv - > tt_buff = kmalloc ( new_len , GFP_ATOMIC ) ;
2011-04-27 16:27:44 +04:00
if ( bat_priv - > tt_buff ) {
2012-05-07 00:22:05 +04:00
memcpy ( bat_priv - > tt_buff , tt_buff , new_len ) ;
bat_priv - > tt_buff_len = new_len ;
2011-04-27 16:27:44 +04:00
}
}
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
2010-12-13 14:19:28 +03:00
2012-04-23 12:32:55 +04:00
return count ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:39 +04:00
int batadv_tt_local_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret = 0 ;
2011-04-20 17:40:58 +04:00
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-20 17:40:58 +04:00
if ( ! primary_if ) {
2012-03-07 12:07:45 +04:00
ret = seq_printf ( seq ,
" BATMAN mesh %s disabled - please specify interfaces to enable it \n " ,
2011-04-20 17:40:58 +04:00
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
2012-03-07 12:07:45 +04:00
ret = seq_printf ( seq ,
" BATMAN mesh %s disabled - primary interface not active \n " ,
2011-04-20 17:40:58 +04:00
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
2012-03-07 12:07:45 +04:00
seq_printf ( seq ,
" Locally retrieved addresses (from %s) announced via TT (TTVN: %u): \n " ,
2011-04-27 16:27:44 +04:00
net_dev - > name , ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-22 20:15:26 +04:00
seq_printf ( seq , " * %pM [%c%c%c%c%c] \n " ,
2012-02-28 13:55:36 +04:00
tt_common_entry - > addr ,
( tt_common_entry - > flags &
TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
( tt_common_entry - > flags &
TT_CLIENT_NOPURGE ? ' P ' : ' . ' ) ,
( tt_common_entry - > flags &
TT_CLIENT_NEW ? ' N ' : ' . ' ) ,
( tt_common_entry - > flags &
TT_CLIENT_PENDING ? ' X ' : ' . ' ) ,
( tt_common_entry - > flags &
TT_CLIENT_WIFI ? ' W ' : ' . ' ) ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-20 17:40:58 +04:00
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-07-07 03:40:58 +04:00
static void tt_local_set_pending ( struct bat_priv * bat_priv ,
struct tt_local_entry * tt_local_entry ,
2012-01-07 00:31:34 +04:00
uint16_t flags , const char * message )
2010-12-13 14:19:28 +03:00
{
2011-10-30 15:17:33 +04:00
tt_local_event ( bat_priv , tt_local_entry - > common . addr ,
tt_local_entry - > common . flags | flags ) ;
2011-04-27 16:27:44 +04:00
2011-07-09 19:52:13 +04:00
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
2012-05-12 04:09:43 +04:00
* response issued before the net ttvn increment ( consistency check )
*/
2011-10-30 15:17:33 +04:00
tt_local_entry - > common . flags | = TT_CLIENT_PENDING ;
2012-01-07 00:31:34 +04:00
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Local tt entry (%pM) pending to be removed: %s \n " ,
tt_local_entry - > common . addr , message ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:39 +04:00
void batadv_tt_local_remove ( struct bat_priv * bat_priv , const uint8_t * addr ,
const char * message , bool roaming )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2011-07-07 03:40:58 +04:00
tt_local_set_pending ( bat_priv , tt_local_entry , TT_CLIENT_DEL |
2012-01-07 00:31:34 +04:00
( roaming ? TT_CLIENT_ROAM : NO_FLAGS ) , message ) ;
2011-04-27 16:28:07 +04:00
out :
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static void tt_local_purge ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
struct tt_local_entry * tt_local_entry ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
if ( tt_local_entry - > common . flags & TT_CLIENT_NOPURGE )
2011-02-18 15:28:09 +03:00
continue ;
2010-12-13 14:19:28 +03:00
2011-07-07 03:40:58 +04:00
/* entry already marked for deletion */
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_PENDING )
2011-07-07 03:40:58 +04:00
continue ;
2012-05-12 15:48:58 +04:00
if ( ! batadv_has_timed_out ( tt_local_entry - > last_seen ,
TT_LOCAL_TIMEOUT ) )
2011-02-18 15:28:09 +03:00
continue ;
2011-07-07 03:40:58 +04:00
tt_local_set_pending ( bat_priv , tt_local_entry ,
2012-01-07 00:31:34 +04:00
TT_CLIENT_DEL , " timed out " ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
}
2011-04-27 16:27:44 +04:00
static void tt_local_table_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
struct hashtable_t * hash ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_local_entry * tt_local_entry ;
2011-04-27 16:28:07 +04:00
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_local_hash )
2010-12-13 14:19:28 +03:00
return ;
2011-04-27 16:27:44 +04:00
hash = bat_priv - > tt_local_hash ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
2011-04-27 16:28:07 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:27:44 +04:00
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2011-04-27 16:27:44 +04:00
2011-05-05 10:42:45 +04:00
bat_priv - > tt_local_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static int tt_global_init ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
if ( bat_priv - > tt_global_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:32 +04:00
bat_priv - > tt_global_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_global_hash )
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
static void tt_changes_list_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:27:44 +04:00
struct tt_change_node * entry , * safe ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & bat_priv - > tt_changes_list_lock ) ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
list_for_each_entry_safe ( entry , safe , & bat_priv - > tt_changes_list ,
list ) {
list_del ( & entry - > list ) ;
kfree ( entry ) ;
}
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
atomic_set ( & bat_priv - > tt_local_changes , 0 ) ;
spin_unlock_bh ( & bat_priv - > tt_changes_list_lock ) ;
}
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
/* find out if an orig_node is already in the list of a tt_global_entry.
* returns 1 if found , 0 otherwise
*/
static bool tt_global_entry_has_orig ( const struct tt_global_entry * entry ,
const struct orig_node * orig_node )
{
struct tt_orig_list_entry * tmp_orig_entry ;
const struct hlist_head * head ;
struct hlist_node * node ;
bool found = false ;
rcu_read_lock ( ) ;
head = & entry - > orig_list ;
hlist_for_each_entry_rcu ( tmp_orig_entry , node , head , list ) {
if ( tmp_orig_entry - > orig_node = = orig_node ) {
found = true ;
break ;
}
}
rcu_read_unlock ( ) ;
return found ;
}
static void tt_global_add_orig_entry ( struct tt_global_entry * tt_global_entry ,
struct orig_node * orig_node ,
int ttvn )
{
struct tt_orig_list_entry * orig_entry ;
orig_entry = kzalloc ( sizeof ( * orig_entry ) , GFP_ATOMIC ) ;
if ( ! orig_entry )
return ;
INIT_HLIST_NODE ( & orig_entry - > list ) ;
atomic_inc ( & orig_node - > refcount ) ;
atomic_inc ( & orig_node - > tt_size ) ;
orig_entry - > orig_node = orig_node ;
orig_entry - > ttvn = ttvn ;
spin_lock_bh ( & tt_global_entry - > list_lock ) ;
hlist_add_head_rcu ( & orig_entry - > list ,
& tt_global_entry - > orig_list ) ;
spin_unlock_bh ( & tt_global_entry - > list_lock ) ;
}
2011-04-27 16:27:44 +04:00
/* caller must hold orig_node refcount */
2012-05-12 04:09:39 +04:00
int batadv_tt_global_add ( struct bat_priv * bat_priv , struct orig_node * orig_node ,
const unsigned char * tt_addr , uint8_t ttvn ,
bool roaming , bool wifi )
2011-04-27 16:27:44 +04:00
{
2011-10-22 22:12:51 +04:00
struct tt_global_entry * tt_global_entry = NULL ;
2011-04-27 16:28:07 +04:00
int ret = 0 ;
2011-11-02 23:26:45 +04:00
int hash_added ;
2012-05-12 15:48:55 +04:00
struct tt_common_entry * common ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:27:44 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , tt_addr ) ;
if ( ! tt_global_entry ) {
2011-10-22 22:12:51 +04:00
tt_global_entry = kzalloc ( sizeof ( * tt_global_entry ) ,
GFP_ATOMIC ) ;
2011-04-27 16:27:44 +04:00
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2012-05-12 15:48:55 +04:00
common = & tt_global_entry - > common ;
memcpy ( common - > addr , tt_addr , ETH_ALEN ) ;
2011-10-22 22:12:51 +04:00
2012-05-12 15:48:55 +04:00
common - > flags = NO_FLAGS ;
2011-04-27 16:27:57 +04:00
tt_global_entry - > roam_at = 0 ;
2012-05-12 15:48:55 +04:00
atomic_set ( & common - > refcount , 2 ) ;
2011-10-22 22:12:51 +04:00
INIT_HLIST_HEAD ( & tt_global_entry - > orig_list ) ;
spin_lock_init ( & tt_global_entry - > list_lock ) ;
2011-04-27 16:28:07 +04:00
2012-05-12 15:48:55 +04:00
hash_added = batadv_hash_add ( bat_priv - > tt_global_hash ,
2012-05-12 15:48:56 +04:00
compare_tt , batadv_choose_orig ,
2012-05-12 15:48:55 +04:00
common , & common - > hash_entry ) ;
2011-11-02 23:26:45 +04:00
if ( unlikely ( hash_added ! = 0 ) ) {
/* remove the reference for the hash */
tt_global_entry_free_ref ( tt_global_entry ) ;
goto out_remove ;
}
2011-10-22 22:12:51 +04:00
tt_global_add_orig_entry ( tt_global_entry , orig_node , ttvn ) ;
2011-04-27 16:27:44 +04:00
} else {
2011-10-22 22:12:51 +04:00
/* there is already a global entry, use this one. */
/* If there is the TT_CLIENT_ROAM flag set, there is only one
* originator left in the list and we previously received a
* delete + roaming change for this originator .
*
* We should first delete the old originator before adding the
* new one .
*/
if ( tt_global_entry - > common . flags & TT_CLIENT_ROAM ) {
tt_global_del_orig_list ( tt_global_entry ) ;
tt_global_entry - > common . flags & = ~ TT_CLIENT_ROAM ;
tt_global_entry - > roam_at = 0 ;
2011-04-27 16:27:44 +04:00
}
2011-10-22 22:12:51 +04:00
if ( ! tt_global_entry_has_orig ( tt_global_entry , orig_node ) )
tt_global_add_orig_entry ( tt_global_entry , orig_node ,
ttvn ) ;
2011-04-27 16:27:44 +04:00
}
2010-12-13 14:19:28 +03:00
2011-07-07 17:35:35 +04:00
if ( wifi )
2011-10-30 15:17:33 +04:00
tt_global_entry - > common . flags | = TT_CLIENT_WIFI ;
2011-07-07 17:35:35 +04:00
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Creating new global tt entry: %pM (via %pM) \n " ,
tt_global_entry - > common . addr , orig_node - > orig ) ;
2010-12-13 14:19:28 +03:00
2011-11-02 23:26:45 +04:00
out_remove :
2011-04-27 16:27:44 +04:00
/* remove address from local hash if present */
2012-05-12 04:09:39 +04:00
batadv_tt_local_remove ( bat_priv , tt_global_entry - > common . addr ,
" global tt received " , roaming ) ;
2011-04-27 16:28:07 +04:00
ret = 1 ;
out :
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* print all orig nodes who announce the address for this global entry.
* it is assumed that the caller holds rcu_read_lock ( ) ;
*/
static void tt_global_print_entry ( struct tt_global_entry * tt_global_entry ,
struct seq_file * seq )
{
struct hlist_head * head ;
struct hlist_node * node ;
struct tt_orig_list_entry * orig_entry ;
struct tt_common_entry * tt_common_entry ;
uint16_t flags ;
uint8_t last_ttvn ;
tt_common_entry = & tt_global_entry - > common ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
flags = tt_common_entry - > flags ;
last_ttvn = atomic_read ( & orig_entry - > orig_node - > last_ttvn ) ;
seq_printf ( seq , " * %pM (%3u) via %pM (%3u) [%c%c] \n " ,
tt_global_entry - > common . addr , orig_entry - > ttvn ,
orig_entry - > orig_node - > orig , last_ttvn ,
( flags & TT_CLIENT_ROAM ? ' R ' : ' . ' ) ,
( flags & TT_CLIENT_WIFI ? ' W ' : ' . ' ) ) ;
}
}
2012-05-12 04:09:39 +04:00
int batadv_tt_global_seq_print_text ( struct seq_file * seq , void * offset )
2010-12-13 14:19:28 +03:00
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
2011-05-05 10:42:45 +04:00
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-05-05 10:42:45 +04:00
struct tt_global_entry * tt_global_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int ret = 0 ;
2011-04-20 17:40:58 +04:00
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-20 17:40:58 +04:00
if ( ! primary_if ) {
2012-03-07 12:07:45 +04:00
ret = seq_printf ( seq ,
" BATMAN mesh %s disabled - please specify interfaces to enable it \n " ,
2011-04-20 17:40:58 +04:00
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
2012-03-07 12:07:45 +04:00
ret = seq_printf ( seq ,
" BATMAN mesh %s disabled - primary interface not active \n " ,
2011-04-20 17:40:58 +04:00
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-05-05 10:42:45 +04:00
seq_printf ( seq ,
" Globally announced TT entries received via the mesh %s \n " ,
2010-12-13 14:19:28 +03:00
net_dev - > name ) ;
2011-07-07 17:35:38 +04:00
seq_printf ( seq , " %-13s %s %-15s %s %s \n " ,
" Client " , " (TTVN) " , " Originator " , " (Curr TTVN) " , " Flags " ) ;
2010-12-13 14:19:28 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
tt_global_print_entry ( tt_global_entry , seq ) ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-20 17:40:58 +04:00
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* deletes the orig list of a tt_global_entry */
static void tt_global_del_orig_list ( struct tt_global_entry * tt_global_entry )
2010-12-13 14:19:28 +03:00
{
2011-10-22 22:12:51 +04:00
struct hlist_head * head ;
struct hlist_node * node , * safe ;
struct tt_orig_list_entry * orig_entry ;
2011-04-27 16:27:44 +04:00
2011-10-22 22:12:51 +04:00
spin_lock_bh ( & tt_global_entry - > list_lock ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_safe ( orig_entry , node , safe , head , list ) {
hlist_del_rcu ( node ) ;
tt_orig_list_entry_free_ref ( orig_entry ) ;
}
spin_unlock_bh ( & tt_global_entry - > list_lock ) ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
}
static void tt_global_del_orig_entry ( struct bat_priv * bat_priv ,
struct tt_global_entry * tt_global_entry ,
struct orig_node * orig_node ,
const char * message )
{
struct hlist_head * head ;
struct hlist_node * node , * safe ;
struct tt_orig_list_entry * orig_entry ;
spin_lock_bh ( & tt_global_entry - > list_lock ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_safe ( orig_entry , node , safe , head , list ) {
if ( orig_entry - > orig_node = = orig_node ) {
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Deleting %pM from global tt entry %pM: %s \n " ,
orig_node - > orig ,
tt_global_entry - > common . addr , message ) ;
2011-10-22 22:12:51 +04:00
hlist_del_rcu ( node ) ;
tt_orig_list_entry_free_ref ( orig_entry ) ;
}
}
spin_unlock_bh ( & tt_global_entry - > list_lock ) ;
}
static void tt_global_del_struct ( struct bat_priv * bat_priv ,
struct tt_global_entry * tt_global_entry ,
const char * message )
{
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv , " Deleting global tt entry %pM: %s \n " ,
tt_global_entry - > common . addr , message ) ;
2011-04-27 16:28:07 +04:00
2012-05-12 15:48:56 +04:00
batadv_hash_remove ( bat_priv - > tt_global_hash , compare_tt ,
batadv_choose_orig , tt_global_entry - > common . addr ) ;
2011-10-22 22:12:51 +04:00
tt_global_entry_free_ref ( tt_global_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-10-22 22:12:51 +04:00
/* If the client is to be deleted, we check if it is the last origantor entry
* within tt_global entry . If yes , we set the TT_CLIENT_ROAM flag and the timer ,
* otherwise we simply remove the originator scheduled for deletion .
*/
static void tt_global_del_roaming ( struct bat_priv * bat_priv ,
struct tt_global_entry * tt_global_entry ,
struct orig_node * orig_node ,
const char * message )
{
bool last_entry = true ;
struct hlist_head * head ;
struct hlist_node * node ;
struct tt_orig_list_entry * orig_entry ;
/* no local entry exists, case 1:
* Check if this is the last one or if other entries exist .
*/
rcu_read_lock ( ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
if ( orig_entry - > orig_node ! = orig_node ) {
last_entry = false ;
break ;
}
}
rcu_read_unlock ( ) ;
if ( last_entry ) {
/* its the last one, mark for roaming. */
tt_global_entry - > common . flags | = TT_CLIENT_ROAM ;
tt_global_entry - > roam_at = jiffies ;
} else
/* there is another entry, we can simply delete this
* one and can still use the other one .
*/
tt_global_del_orig_entry ( bat_priv , tt_global_entry ,
orig_node , message ) ;
}
2012-02-05 21:55:22 +04:00
static void tt_global_del ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
const unsigned char * addr ,
const char * message , bool roaming )
2011-04-27 16:27:44 +04:00
{
2011-04-27 16:28:07 +04:00
struct tt_global_entry * tt_global_entry = NULL ;
2011-12-05 01:38:27 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
2011-04-27 16:27:44 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2011-10-22 22:12:51 +04:00
if ( ! tt_global_entry )
2011-04-27 16:28:07 +04:00
goto out ;
2011-04-27 16:27:44 +04:00
2011-10-22 22:12:51 +04:00
if ( ! roaming ) {
tt_global_del_orig_entry ( bat_priv , tt_global_entry , orig_node ,
message ) ;
if ( hlist_empty ( & tt_global_entry - > orig_list ) )
tt_global_del_struct ( bat_priv , tt_global_entry ,
message ) ;
goto out ;
}
2011-12-22 16:31:12 +04:00
/* if we are deleting a global entry due to a roam
* event , there are two possibilities :
2011-10-22 22:12:51 +04:00
* 1 ) the client roamed from node A to node B = > if there
* is only one originator left for this client , we mark
2011-12-22 16:31:12 +04:00
* it with TT_CLIENT_ROAM , we start a timer and we
* wait for node B to claim it . In case of timeout
* the entry is purged .
2011-10-22 22:12:51 +04:00
*
* If there are other originators left , we directly delete
* the originator .
2011-12-22 16:31:12 +04:00
* 2 ) the client roamed to us = > we can directly delete
2012-05-12 04:09:43 +04:00
* the global entry , since it is useless now .
*/
2011-12-22 16:31:12 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv ,
tt_global_entry - > common . addr ) ;
2011-10-22 22:12:51 +04:00
if ( tt_local_entry ) {
/* local entry exists, case 2: client roamed to us. */
tt_global_del_orig_list ( tt_global_entry ) ;
tt_global_del_struct ( bat_priv , tt_global_entry , message ) ;
} else
/* no local entry exists, case 1: check for roaming */
tt_global_del_roaming ( bat_priv , tt_global_entry , orig_node ,
message ) ;
2011-12-22 16:31:12 +04:00
2011-04-27 16:27:57 +04:00
out :
2011-04-27 16:28:07 +04:00
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
2011-12-05 01:38:27 +04:00
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-04-27 16:27:44 +04:00
}
2012-05-12 04:09:39 +04:00
void batadv_tt_global_del_orig ( struct bat_priv * bat_priv ,
struct orig_node * orig_node , const char * message )
2010-12-13 14:19:28 +03:00
{
2011-05-05 10:42:45 +04:00
struct tt_global_entry * tt_global_entry ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
struct hlist_node * node , * safe ;
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2010-12-13 14:19:28 +03:00
2011-10-19 12:28:26 +04:00
if ( ! hash )
return ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , safe ,
2012-02-28 13:55:36 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
tt_global_del_orig_entry ( bat_priv , tt_global_entry ,
orig_node , message ) ;
if ( hlist_empty ( & tt_global_entry - > orig_list ) ) {
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Deleting global tt entry %pM: %s \n " ,
tt_global_entry - > common . addr ,
message ) ;
2011-04-27 16:28:07 +04:00
hlist_del_rcu ( node ) ;
tt_global_entry_free_ref ( tt_global_entry ) ;
}
2011-04-27 16:27:44 +04:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
atomic_set ( & orig_node - > tt_size , 0 ) ;
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = false ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:57 +04:00
static void tt_global_roam_purge ( struct bat_priv * bat_priv )
{
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:57 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-04-27 16:28:07 +04:00
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:57 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-04-27 16:28:07 +04:00
list_lock = & hash - > list_locks [ i ] ;
2011-04-27 16:27:57 +04:00
2011-04-27 16:28:07 +04:00
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:27:57 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
if ( ! ( tt_global_entry - > common . flags & TT_CLIENT_ROAM ) )
2011-04-27 16:27:57 +04:00
continue ;
2012-05-12 15:48:58 +04:00
if ( ! batadv_has_timed_out ( tt_global_entry - > roam_at ,
TT_CLIENT_ROAM_TIMEOUT ) )
2011-04-27 16:27:57 +04:00
continue ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Deleting global tt entry (%pM): Roaming timeout \n " ,
tt_global_entry - > common . addr ) ;
2011-10-22 22:12:51 +04:00
2011-04-27 16:28:07 +04:00
hlist_del_rcu ( node ) ;
tt_global_entry_free_ref ( tt_global_entry ) ;
2011-04-27 16:27:57 +04:00
}
2011-04-27 16:28:07 +04:00
spin_unlock_bh ( list_lock ) ;
2011-04-27 16:27:57 +04:00
}
}
2011-04-27 16:27:44 +04:00
static void tt_global_table_free ( struct bat_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2011-04-27 16:28:07 +04:00
struct hashtable_t * hash ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:28:07 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:28:07 +04:00
2011-05-05 10:42:45 +04:00
if ( ! bat_priv - > tt_global_hash )
2010-12-13 14:19:28 +03:00
return ;
2011-04-27 16:28:07 +04:00
hash = bat_priv - > tt_global_hash ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-04-27 16:28:07 +04:00
head , hash_entry ) {
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-04-27 16:28:07 +04:00
tt_global_entry_free_ref ( tt_global_entry ) ;
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2011-04-27 16:28:07 +04:00
2011-05-05 10:42:45 +04:00
bat_priv - > tt_global_hash = NULL ;
2010-12-13 14:19:28 +03:00
}
2011-07-07 17:35:36 +04:00
static bool _is_ap_isolated ( struct tt_local_entry * tt_local_entry ,
struct tt_global_entry * tt_global_entry )
{
bool ret = false ;
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_WIFI & &
tt_global_entry - > common . flags & TT_CLIENT_WIFI )
2011-07-07 17:35:36 +04:00
ret = true ;
return ret ;
}
2012-05-12 04:09:39 +04:00
struct orig_node * batadv_transtable_search ( struct bat_priv * bat_priv ,
const uint8_t * src ,
const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2011-07-07 17:35:37 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
2011-02-18 15:28:10 +03:00
struct orig_node * orig_node = NULL ;
2011-10-22 22:12:51 +04:00
struct neigh_node * router = NULL ;
struct hlist_head * head ;
struct hlist_node * node ;
struct tt_orig_list_entry * orig_entry ;
int best_tq ;
2010-12-13 14:19:28 +03:00
2011-07-07 17:35:37 +04:00
if ( src & & atomic_read ( & bat_priv - > ap_isolation ) ) {
tt_local_entry = tt_local_hash_find ( bat_priv , src ) ;
if ( ! tt_local_entry )
goto out ;
}
2011-02-18 15:28:09 +03:00
2011-07-07 17:35:37 +04:00
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
2011-05-05 10:42:45 +04:00
if ( ! tt_global_entry )
2011-02-18 15:28:10 +03:00
goto out ;
2011-02-18 15:28:09 +03:00
2011-07-07 17:35:37 +04:00
/* check whether the clients should not communicate due to AP
2012-05-12 04:09:43 +04:00
* isolation
*/
2011-07-07 17:35:37 +04:00
if ( tt_local_entry & & _is_ap_isolated ( tt_local_entry , tt_global_entry ) )
goto out ;
2011-10-22 22:12:51 +04:00
best_tq = 0 ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
rcu_read_lock ( ) ;
head = & tt_global_entry - > orig_list ;
hlist_for_each_entry_rcu ( orig_entry , node , head , list ) {
2012-05-12 04:09:34 +04:00
router = batadv_orig_node_get_router ( orig_entry - > orig_node ) ;
2011-10-22 22:12:51 +04:00
if ( ! router )
continue ;
2010-12-13 14:19:28 +03:00
2011-10-22 22:12:51 +04:00
if ( router - > tq_avg > best_tq ) {
orig_node = orig_entry - > orig_node ;
best_tq = router - > tq_avg ;
}
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( router ) ;
2011-10-22 22:12:51 +04:00
}
/* found anything? */
if ( orig_node & & ! atomic_inc_not_zero ( & orig_node - > refcount ) )
orig_node = NULL ;
rcu_read_unlock ( ) ;
2011-02-18 15:28:10 +03:00
out :
2011-07-07 17:35:37 +04:00
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
2011-02-18 15:28:10 +03:00
return orig_node ;
2010-12-13 14:19:28 +03:00
}
2011-04-27 16:27:44 +04:00
/* Calculates the checksum of the local table of a given orig_node */
2012-02-05 21:55:22 +04:00
static uint16_t tt_global_crc ( struct bat_priv * bat_priv ,
struct orig_node * orig_node )
2011-04-27 16:27:44 +04:00
{
uint16_t total = 0 , total_one ;
struct hashtable_t * hash = bat_priv - > tt_global_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_global_entry * tt_global_entry ;
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry ,
struct tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
/* Roaming clients are in the global table for
* consistency only . They don ' t have to be
* taken into account while computing the
* global crc
*/
if ( tt_global_entry - > common . flags & TT_CLIENT_ROAM )
continue ;
/* find out if this global entry is announced by this
* originator
*/
if ( ! tt_global_entry_has_orig ( tt_global_entry ,
orig_node ) )
continue ;
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
tt_global_entry - > common . addr [ j ] ) ;
total ^ = total_one ;
2011-04-27 16:27:44 +04:00
}
rcu_read_unlock ( ) ;
}
return total ;
}
/* Calculates the checksum of the local table */
2012-05-07 00:22:05 +04:00
static uint16_t batadv_tt_local_crc ( struct bat_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
uint16_t total = 0 , total_one ;
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct hlist_node * node ;
struct hlist_head * head ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
int j ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
2011-07-07 03:40:58 +04:00
/* not yet committed clients have not to be taken into
2012-05-12 04:09:43 +04:00
* account while computing the CRC
*/
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
continue ;
2011-04-27 16:27:44 +04:00
total_one = 0 ;
for ( j = 0 ; j < ETH_ALEN ; j + + )
total_one = crc16_byte ( total_one ,
2011-10-30 15:17:33 +04:00
tt_common_entry - > addr [ j ] ) ;
2011-04-27 16:27:44 +04:00
total ^ = total_one ;
}
rcu_read_unlock ( ) ;
}
return total ;
}
static void tt_req_list_free ( struct bat_priv * bat_priv )
{
struct tt_req_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
}
2012-02-05 21:55:22 +04:00
static void tt_save_orig_buffer ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
const unsigned char * tt_buff ,
uint8_t tt_num_changes )
2011-04-27 16:27:44 +04:00
{
2012-05-12 04:09:39 +04:00
uint16_t tt_buff_len = batadv_tt_len ( tt_num_changes ) ;
2011-04-27 16:27:44 +04:00
/* Replace the old buffer only if I received something in the
2012-05-12 04:09:43 +04:00
* last OGM ( the OGM could carry no changes )
*/
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
if ( tt_buff_len > 0 ) {
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = kmalloc ( tt_buff_len , GFP_ATOMIC ) ;
if ( orig_node - > tt_buff ) {
memcpy ( orig_node - > tt_buff , tt_buff , tt_buff_len ) ;
orig_node - > tt_buff_len = tt_buff_len ;
}
}
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
}
static void tt_req_purge ( struct bat_priv * bat_priv )
{
struct tt_req_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
2012-05-12 15:48:58 +04:00
if ( batadv_has_timed_out ( node - > issued_at , TT_REQUEST_TIMEOUT ) ) {
2011-04-27 16:27:44 +04:00
list_del ( & node - > list ) ;
kfree ( node ) ;
}
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
}
/* returns the pointer to the new tt_req_node struct if no request
2012-05-12 04:09:43 +04:00
* has already been issued for this orig_node , NULL otherwise
*/
2011-04-27 16:27:44 +04:00
static struct tt_req_node * new_tt_req_node ( struct bat_priv * bat_priv ,
struct orig_node * orig_node )
{
struct tt_req_node * tt_req_node_tmp , * tt_req_node = NULL ;
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry ( tt_req_node_tmp , & bat_priv - > tt_req_list , list ) {
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( tt_req_node_tmp , orig_node ) & &
! batadv_has_timed_out ( tt_req_node_tmp - > issued_at ,
TT_REQUEST_TIMEOUT ) )
2011-04-27 16:27:44 +04:00
goto unlock ;
}
tt_req_node = kmalloc ( sizeof ( * tt_req_node ) , GFP_ATOMIC ) ;
if ( ! tt_req_node )
goto unlock ;
memcpy ( tt_req_node - > addr , orig_node - > orig , ETH_ALEN ) ;
tt_req_node - > issued_at = jiffies ;
list_add ( & tt_req_node - > list , & bat_priv - > tt_req_list ) ;
unlock :
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
return tt_req_node ;
}
2011-07-07 03:40:58 +04:00
/* data_ptr is useless here, but has to be kept to respect the prototype */
static int tt_local_valid_entry ( const void * entry_ptr , const void * data_ptr )
{
2011-10-30 15:17:33 +04:00
const struct tt_common_entry * tt_common_entry = entry_ptr ;
2011-07-07 03:40:58 +04:00
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_NEW )
2011-07-07 03:40:58 +04:00
return 0 ;
return 1 ;
}
2011-04-27 16:27:44 +04:00
static int tt_global_valid_entry ( const void * entry_ptr , const void * data_ptr )
{
2011-10-30 15:17:33 +04:00
const struct tt_common_entry * tt_common_entry = entry_ptr ;
const struct tt_global_entry * tt_global_entry ;
2011-04-27 16:27:44 +04:00
const struct orig_node * orig_node = data_ptr ;
2011-10-30 15:17:33 +04:00
if ( tt_common_entry - > flags & TT_CLIENT_ROAM )
2011-04-27 16:27:57 +04:00
return 0 ;
2011-10-30 15:17:33 +04:00
tt_global_entry = container_of ( tt_common_entry , struct tt_global_entry ,
common ) ;
2011-10-22 22:12:51 +04:00
return tt_global_entry_has_orig ( tt_global_entry , orig_node ) ;
2011-04-27 16:27:44 +04:00
}
static struct sk_buff * tt_response_fill_table ( uint16_t tt_len , uint8_t ttvn ,
struct hashtable_t * hash ,
struct hard_iface * primary_if ,
int ( * valid_cb ) ( const void * ,
const void * ) ,
void * cb_data )
{
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-04-27 16:27:44 +04:00
struct tt_query_packet * tt_response ;
struct tt_change * tt_change ;
struct hlist_node * node ;
struct hlist_head * head ;
struct sk_buff * skb = NULL ;
uint16_t tt_tot , tt_count ;
ssize_t tt_query_size = sizeof ( struct tt_query_packet ) ;
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-04-27 16:27:44 +04:00
if ( tt_query_size + tt_len > primary_if - > soft_iface - > mtu ) {
tt_len = primary_if - > soft_iface - > mtu - tt_query_size ;
tt_len - = tt_len % sizeof ( struct tt_change ) ;
}
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( tt_query_size + tt_len + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
tt_query_size + tt_len ) ;
tt_response - > ttvn = ttvn ;
tt_change = ( struct tt_change * ) ( skb - > data + tt_query_size ) ;
tt_count = 0 ;
rcu_read_lock ( ) ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-04-27 16:27:44 +04:00
head , hash_entry ) {
if ( tt_count = = tt_tot )
break ;
2011-10-30 15:17:33 +04:00
if ( ( valid_cb ) & & ( ! valid_cb ( tt_common_entry , cb_data ) ) )
2011-04-27 16:27:44 +04:00
continue ;
2011-10-30 15:17:33 +04:00
memcpy ( tt_change - > addr , tt_common_entry - > addr ,
ETH_ALEN ) ;
2011-04-27 16:27:44 +04:00
tt_change - > flags = NO_FLAGS ;
tt_count + + ;
tt_change + + ;
}
}
rcu_read_unlock ( ) ;
2011-10-17 16:25:13 +04:00
/* store in the message the number of entries we have successfully
2012-05-12 04:09:43 +04:00
* copied
*/
2011-10-17 16:25:13 +04:00
tt_response - > tt_data = htons ( tt_count ) ;
2011-04-27 16:27:44 +04:00
out :
return skb ;
}
2011-07-30 15:10:18 +04:00
static int send_tt_request ( struct bat_priv * bat_priv ,
struct orig_node * dst_orig_node ,
uint8_t ttvn , uint16_t tt_crc , bool full_table )
2011-04-27 16:27:44 +04:00
{
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_request ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if ;
struct tt_req_node * tt_req_node = NULL ;
int ret = 1 ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
/* The new tt_req will be issued only if I'm not waiting for a
2012-05-12 04:09:43 +04:00
* reply from the same orig_node yet
*/
2011-04-27 16:27:44 +04:00
tt_req_node = new_tt_req_node ( bat_priv , dst_orig_node ) ;
if ( ! tt_req_node )
goto out ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_request = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) ) ;
2011-11-20 18:47:38 +04:00
tt_request - > header . packet_type = BAT_TT_QUERY ;
tt_request - > header . version = COMPAT_VERSION ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_request - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_request - > dst , dst_orig_node - > orig , ETH_ALEN ) ;
2011-11-20 18:47:38 +04:00
tt_request - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
tt_request - > ttvn = ttvn ;
2012-04-14 15:15:27 +04:00
tt_request - > tt_data = htons ( tt_crc ) ;
2011-04-27 16:27:44 +04:00
tt_request - > flags = TT_REQUEST ;
if ( full_table )
tt_request - > flags | = TT_FULL_TABLE ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Sending TT_REQUEST to %pM via %pM [%c] \n " ,
dst_orig_node - > orig , neigh_node - > addr ,
( full_table ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-04-20 19:02:45 +04:00
batadv_inc_counter ( bat_priv , BAT_CNT_TT_REQUEST_TX ) ;
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = 0 ;
out :
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ret )
kfree_skb ( skb ) ;
if ( ret & & tt_req_node ) {
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_del ( & tt_req_node - > list ) ;
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
kfree ( tt_req_node ) ;
}
return ret ;
}
static bool send_other_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
{
struct orig_node * req_dst_orig_node = NULL , * res_dst_orig_node = NULL ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if = NULL ;
uint8_t orig_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_response ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c] \n " ,
tt_request - > src , tt_request - > ttvn , tt_request - > dst ,
( tt_request - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
/* Let's get the orig node of the REAL destination */
2012-05-12 15:48:56 +04:00
req_dst_orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > dst ) ;
2011-04-27 16:27:44 +04:00
if ( ! req_dst_orig_node )
goto out ;
2012-05-12 15:48:56 +04:00
res_dst_orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! res_dst_orig_node )
goto out ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( res_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
orig_ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
req_ttvn = tt_request - > ttvn ;
2011-07-09 19:52:13 +04:00
/* I don't have the requested data */
2011-04-27 16:27:44 +04:00
if ( orig_ttvn ! = req_ttvn | |
2012-04-22 10:44:27 +04:00
tt_request - > tt_data ! = htons ( req_dst_orig_node - > tt_crc ) )
2011-04-27 16:27:44 +04:00
goto out ;
2011-07-09 19:52:13 +04:00
/* If the full table has been explicitly requested */
2011-04-27 16:27:44 +04:00
if ( tt_request - > flags & TT_FULL_TABLE | |
! req_dst_orig_node - > tt_buff )
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
2012-05-12 04:09:43 +04:00
* I ' ll send only one packet with as much TT entries as I can
*/
2011-04-27 16:27:44 +04:00
if ( ! full_table ) {
spin_lock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
tt_len = req_dst_orig_node - > tt_buff_len ;
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) +
tt_len + ETH_HLEN ) ;
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) + tt_len ) ;
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
tt_buff = skb - > data + sizeof ( struct tt_query_packet ) ;
/* Copy the last orig_node's OGM buffer */
memcpy ( tt_buff , req_dst_orig_node - > tt_buff ,
req_dst_orig_node - > tt_buff_len ) ;
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
} else {
tt_len = ( uint16_t ) atomic_read ( & req_dst_orig_node - > tt_size ) *
sizeof ( struct tt_change ) ;
ttvn = ( uint8_t ) atomic_read ( & req_dst_orig_node - > last_ttvn ) ;
skb = tt_response_fill_table ( tt_len , ttvn ,
bat_priv - > tt_global_hash ,
primary_if , tt_global_valid_entry ,
req_dst_orig_node ) ;
if ( ! skb )
goto out ;
tt_response = ( struct tt_query_packet * ) skb - > data ;
}
2011-11-20 18:47:38 +04:00
tt_response - > header . packet_type = BAT_TT_QUERY ;
tt_response - > header . version = COMPAT_VERSION ;
tt_response - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , req_dst_orig_node - > orig , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
tt_response - > flags = TT_RESPONSE ;
if ( full_table )
tt_response - > flags | = TT_FULL_TABLE ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u) \n " ,
res_dst_orig_node - > orig , neigh_node - > addr ,
req_dst_orig_node - > orig , req_ttvn ) ;
2011-04-27 16:27:44 +04:00
2012-04-20 19:02:45 +04:00
batadv_inc_counter ( bat_priv , BAT_CNT_TT_RESPONSE_TX ) ;
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = true ;
goto out ;
unlock :
spin_unlock_bh ( & req_dst_orig_node - > tt_buff_lock ) ;
out :
if ( res_dst_orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( res_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( req_dst_orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( req_dst_orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ! ret )
kfree_skb ( skb ) ;
return ret ;
}
static bool send_my_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
{
struct orig_node * orig_node = NULL ;
struct neigh_node * neigh_node = NULL ;
struct hard_iface * primary_if = NULL ;
uint8_t my_ttvn , req_ttvn , ttvn ;
int ret = false ;
unsigned char * tt_buff ;
bool full_table ;
uint16_t tt_len , tt_tot ;
struct sk_buff * skb = NULL ;
struct tt_query_packet * tt_response ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Received TT_REQUEST from %pM for ttvn: %u (me) [%c] \n " ,
tt_request - > src , tt_request - > ttvn ,
( tt_request - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
my_ttvn = ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ;
req_ttvn = tt_request - > ttvn ;
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_request - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( ! primary_if )
goto out ;
/* If the full table has been explicitly requested or the gap
2012-05-12 04:09:43 +04:00
* is too big send the whole local translation table
*/
2011-04-27 16:27:44 +04:00
if ( tt_request - > flags & TT_FULL_TABLE | | my_ttvn ! = req_ttvn | |
! bat_priv - > tt_buff )
full_table = true ;
else
full_table = false ;
/* In this version, fragmentation is not implemented, then
2012-05-12 04:09:43 +04:00
* I ' ll send only one packet with as much TT entries as I can
*/
2011-04-27 16:27:44 +04:00
if ( ! full_table ) {
spin_lock_bh ( & bat_priv - > tt_buff_lock ) ;
tt_len = bat_priv - > tt_buff_len ;
tt_tot = tt_len / sizeof ( struct tt_change ) ;
skb = dev_alloc_skb ( sizeof ( struct tt_query_packet ) +
tt_len + ETH_HLEN ) ;
if ( ! skb )
goto unlock ;
skb_reserve ( skb , ETH_HLEN ) ;
tt_response = ( struct tt_query_packet * ) skb_put ( skb ,
sizeof ( struct tt_query_packet ) + tt_len ) ;
tt_response - > ttvn = req_ttvn ;
tt_response - > tt_data = htons ( tt_tot ) ;
tt_buff = skb - > data + sizeof ( struct tt_query_packet ) ;
memcpy ( tt_buff , bat_priv - > tt_buff ,
bat_priv - > tt_buff_len ) ;
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
} else {
tt_len = ( uint16_t ) atomic_read ( & bat_priv - > num_local_tt ) *
sizeof ( struct tt_change ) ;
ttvn = ( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ;
skb = tt_response_fill_table ( tt_len , ttvn ,
bat_priv - > tt_local_hash ,
2011-07-07 03:40:58 +04:00
primary_if , tt_local_valid_entry ,
NULL ) ;
2011-04-27 16:27:44 +04:00
if ( ! skb )
goto out ;
tt_response = ( struct tt_query_packet * ) skb - > data ;
}
2011-11-20 18:47:38 +04:00
tt_response - > header . packet_type = BAT_TT_QUERY ;
tt_response - > header . version = COMPAT_VERSION ;
tt_response - > header . ttl = TTL ;
2011-04-27 16:27:44 +04:00
memcpy ( tt_response - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
memcpy ( tt_response - > dst , tt_request - > src , ETH_ALEN ) ;
tt_response - > flags = TT_RESPONSE ;
if ( full_table )
tt_response - > flags | = TT_FULL_TABLE ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Sending TT_RESPONSE to %pM via %pM [%c] \n " ,
orig_node - > orig , neigh_node - > addr ,
( tt_response - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-04-20 19:02:45 +04:00
batadv_inc_counter ( bat_priv , BAT_CNT_TT_RESPONSE_TX ) ;
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:44 +04:00
ret = true ;
goto out ;
unlock :
spin_unlock_bh ( & bat_priv - > tt_buff_lock ) ;
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:44 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:44 +04:00
if ( ! ret )
kfree_skb ( skb ) ;
/* This packet was for me, so it doesn't need to be re-routed */
return true ;
}
2012-05-12 04:09:39 +04:00
bool batadv_send_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_request )
2011-04-27 16:27:44 +04:00
{
2012-05-12 04:09:42 +04:00
if ( batadv_is_my_mac ( tt_request - > dst ) ) {
2012-01-22 23:00:23 +04:00
/* don't answer backbone gws! */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , tt_request - > src ) )
2012-01-22 23:00:23 +04:00
return true ;
2011-04-27 16:27:44 +04:00
return send_my_tt_response ( bat_priv , tt_request ) ;
2012-01-22 23:00:23 +04:00
} else {
2011-04-27 16:27:44 +04:00
return send_other_tt_response ( bat_priv , tt_request ) ;
2012-01-22 23:00:23 +04:00
}
2011-04-27 16:27:44 +04:00
}
static void _tt_update_changes ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
struct tt_change * tt_change ,
uint16_t tt_num_changes , uint8_t ttvn )
{
int i ;
2012-05-12 04:09:39 +04:00
int is_wifi ;
2011-04-27 16:27:44 +04:00
for ( i = 0 ; i < tt_num_changes ; i + + ) {
2012-05-12 04:09:39 +04:00
if ( ( tt_change + i ) - > flags & TT_CLIENT_DEL ) {
2011-04-27 16:27:44 +04:00
tt_global_del ( bat_priv , orig_node ,
( tt_change + i ) - > addr ,
2011-04-27 16:27:57 +04:00
" tt removed by changes " ,
( tt_change + i ) - > flags & TT_CLIENT_ROAM ) ;
2012-05-12 04:09:39 +04:00
} else {
is_wifi = ( tt_change + i ) - > flags & TT_CLIENT_WIFI ;
if ( ! batadv_tt_global_add ( bat_priv , orig_node ,
( tt_change + i ) - > addr , ttvn ,
false , is_wifi ) )
2011-04-27 16:27:44 +04:00
/* In case of problem while storing a
* global_entry , we stop the updating
* procedure without committing the
* ttvn change . This will avoid to send
* corrupted data on tt_request
*/
return ;
2012-05-12 04:09:39 +04:00
}
2011-04-27 16:27:44 +04:00
}
2011-11-07 19:36:40 +04:00
orig_node - > tt_initialised = true ;
2011-04-27 16:27:44 +04:00
}
static void tt_fill_gtable ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_response )
{
struct orig_node * orig_node = NULL ;
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_response - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
/* Purge the old table first.. */
2012-05-12 04:09:39 +04:00
batadv_tt_global_del_orig ( bat_priv , orig_node , " Received full table " ) ;
2011-04-27 16:27:44 +04:00
_tt_update_changes ( bat_priv , orig_node ,
( struct tt_change * ) ( tt_response + 1 ) ,
2012-04-22 10:44:27 +04:00
ntohs ( tt_response - > tt_data ) , tt_response - > ttvn ) ;
2011-04-27 16:27:44 +04:00
spin_lock_bh ( & orig_node - > tt_buff_lock ) ;
kfree ( orig_node - > tt_buff ) ;
orig_node - > tt_buff_len = 0 ;
orig_node - > tt_buff = NULL ;
spin_unlock_bh ( & orig_node - > tt_buff_lock ) ;
atomic_set ( & orig_node - > last_ttvn , tt_response - > ttvn ) ;
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
}
2011-07-30 15:10:18 +04:00
static void tt_update_changes ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
uint16_t tt_num_changes , uint8_t ttvn ,
struct tt_change * tt_change )
2011-04-27 16:27:44 +04:00
{
_tt_update_changes ( bat_priv , orig_node , tt_change , tt_num_changes ,
ttvn ) ;
tt_save_orig_buffer ( bat_priv , orig_node , ( unsigned char * ) tt_change ,
tt_num_changes ) ;
atomic_set ( & orig_node - > last_ttvn , ttvn ) ;
}
2012-05-12 04:09:39 +04:00
bool batadv_is_my_client ( struct bat_priv * bat_priv , const uint8_t * addr )
2011-04-27 16:27:44 +04:00
{
2011-04-27 16:28:07 +04:00
struct tt_local_entry * tt_local_entry = NULL ;
bool ret = false ;
2011-04-27 16:27:44 +04:00
tt_local_entry = tt_local_hash_find ( bat_priv , addr ) ;
2011-04-27 16:28:07 +04:00
if ( ! tt_local_entry )
goto out ;
2011-07-07 03:40:58 +04:00
/* Check if the client has been logically deleted (but is kept for
2012-05-12 04:09:43 +04:00
* consistency purpose )
*/
2011-10-30 15:17:33 +04:00
if ( tt_local_entry - > common . flags & TT_CLIENT_PENDING )
2011-07-07 03:40:58 +04:00
goto out ;
2011-04-27 16:28:07 +04:00
ret = true ;
out :
2011-04-27 16:27:44 +04:00
if ( tt_local_entry )
2011-04-27 16:28:07 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
return ret ;
2011-04-27 16:27:44 +04:00
}
2012-05-12 04:09:39 +04:00
void batadv_handle_tt_response ( struct bat_priv * bat_priv ,
struct tt_query_packet * tt_response )
2011-04-27 16:27:44 +04:00
{
struct tt_req_node * node , * safe ;
struct orig_node * orig_node = NULL ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c] \n " ,
tt_response - > src , tt_response - > ttvn ,
ntohs ( tt_response - > tt_data ) ,
( tt_response - > flags & TT_FULL_TABLE ? ' F ' : ' . ' ) ) ;
2011-04-27 16:27:44 +04:00
2012-01-22 23:00:23 +04:00
/* we should have never asked a backbone gw */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , tt_response - > src ) )
2012-01-22 23:00:23 +04:00
goto out ;
2012-05-12 15:48:56 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , tt_response - > src ) ;
2011-04-27 16:27:44 +04:00
if ( ! orig_node )
goto out ;
if ( tt_response - > flags & TT_FULL_TABLE )
tt_fill_gtable ( bat_priv , tt_response ) ;
else
2012-04-22 10:44:27 +04:00
tt_update_changes ( bat_priv , orig_node ,
ntohs ( tt_response - > tt_data ) ,
2011-04-27 16:27:44 +04:00
tt_response - > ttvn ,
( struct tt_change * ) ( tt_response + 1 ) ) ;
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh ( & bat_priv - > tt_req_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_req_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( node - > addr , tt_response - > src ) )
2011-04-27 16:27:44 +04:00
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_req_list_lock ) ;
/* Recalculate the CRC for this orig_node and store it */
orig_node - > tt_crc = tt_global_crc ( bat_priv , orig_node ) ;
2011-04-27 16:27:57 +04:00
/* Roaming phase is over: tables are in sync again. I can
2012-05-12 04:09:43 +04:00
* unset the flag
*/
2011-04-27 16:27:57 +04:00
orig_node - > tt_poss_change = false ;
2011-04-27 16:27:44 +04:00
out :
if ( orig_node )
2012-05-12 04:09:34 +04:00
batadv_orig_node_free_ref ( orig_node ) ;
2011-04-27 16:27:44 +04:00
}
2012-05-12 04:09:39 +04:00
int batadv_tt_init ( struct bat_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2012-05-05 15:27:28 +04:00
int ret ;
2011-04-27 16:27:44 +04:00
2012-05-05 15:27:28 +04:00
ret = tt_local_init ( bat_priv ) ;
if ( ret < 0 )
return ret ;
ret = tt_global_init ( bat_priv ) ;
if ( ret < 0 )
return ret ;
2011-04-27 16:27:44 +04:00
tt_start_timer ( bat_priv ) ;
return 1 ;
}
2011-04-27 16:27:57 +04:00
static void tt_roam_list_free ( struct bat_priv * bat_priv )
2011-04-27 16:27:44 +04:00
{
2011-04-27 16:27:57 +04:00
struct tt_roam_node * node , * safe ;
2011-04-27 16:27:44 +04:00
2011-04-27 16:27:57 +04:00
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
2011-04-27 16:27:44 +04:00
2011-04-27 16:27:57 +04:00
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_roam_list , list ) {
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
}
static void tt_roam_purge ( struct bat_priv * bat_priv )
{
struct tt_roam_node * node , * safe ;
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
list_for_each_entry_safe ( node , safe , & bat_priv - > tt_roam_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_has_timed_out ( node - > first_time , ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
list_del ( & node - > list ) ;
kfree ( node ) ;
}
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
}
/* This function checks whether the client already reached the
* maximum number of possible roaming phases . In this case the ROAMING_ADV
* will not be sent .
*
2012-05-12 04:09:43 +04:00
* returns true if the ROAMING_ADV can be sent , false otherwise
*/
2011-04-27 16:27:57 +04:00
static bool tt_check_roam_count ( struct bat_priv * bat_priv ,
uint8_t * client )
{
struct tt_roam_node * tt_roam_node ;
bool ret = false ;
spin_lock_bh ( & bat_priv - > tt_roam_list_lock ) ;
/* The new tt_req will be issued only if I'm not waiting for a
2012-05-12 04:09:43 +04:00
* reply from the same orig_node yet
*/
2011-04-27 16:27:57 +04:00
list_for_each_entry ( tt_roam_node , & bat_priv - > tt_roam_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( tt_roam_node - > addr , client ) )
2011-04-27 16:27:57 +04:00
continue ;
2012-05-12 15:48:58 +04:00
if ( batadv_has_timed_out ( tt_roam_node - > first_time ,
ROAMING_MAX_TIME ) )
2011-04-27 16:27:57 +04:00
continue ;
if ( ! atomic_dec_not_zero ( & tt_roam_node - > counter ) )
/* Sorry, you roamed too many times! */
goto unlock ;
ret = true ;
break ;
}
if ( ! ret ) {
tt_roam_node = kmalloc ( sizeof ( * tt_roam_node ) , GFP_ATOMIC ) ;
if ( ! tt_roam_node )
goto unlock ;
tt_roam_node - > first_time = jiffies ;
atomic_set ( & tt_roam_node - > counter , ROAMING_MAX_COUNT - 1 ) ;
memcpy ( tt_roam_node - > addr , client , ETH_ALEN ) ;
list_add ( & tt_roam_node - > list , & bat_priv - > tt_roam_list ) ;
ret = true ;
}
unlock :
spin_unlock_bh ( & bat_priv - > tt_roam_list_lock ) ;
return ret ;
}
2012-02-05 21:55:22 +04:00
static void send_roam_adv ( struct bat_priv * bat_priv , uint8_t * client ,
struct orig_node * orig_node )
2011-04-27 16:27:57 +04:00
{
struct neigh_node * neigh_node = NULL ;
struct sk_buff * skb = NULL ;
struct roam_adv_packet * roam_adv_packet ;
int ret = 1 ;
struct hard_iface * primary_if ;
/* before going on we have to check whether the client has
2012-05-12 04:09:43 +04:00
* already roamed to us too many times
*/
2011-04-27 16:27:57 +04:00
if ( ! tt_check_roam_count ( bat_priv , client ) )
goto out ;
skb = dev_alloc_skb ( sizeof ( struct roam_adv_packet ) + ETH_HLEN ) ;
if ( ! skb )
goto out ;
skb_reserve ( skb , ETH_HLEN ) ;
roam_adv_packet = ( struct roam_adv_packet * ) skb_put ( skb ,
sizeof ( struct roam_adv_packet ) ) ;
2011-11-20 18:47:38 +04:00
roam_adv_packet - > header . packet_type = BAT_ROAM_ADV ;
roam_adv_packet - > header . version = COMPAT_VERSION ;
roam_adv_packet - > header . ttl = TTL ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
if ( ! primary_if )
goto out ;
memcpy ( roam_adv_packet - > src , primary_if - > net_dev - > dev_addr , ETH_ALEN ) ;
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2011-04-27 16:27:57 +04:00
memcpy ( roam_adv_packet - > dst , orig_node - > orig , ETH_ALEN ) ;
memcpy ( roam_adv_packet - > client , client , ETH_ALEN ) ;
2012-05-12 04:09:34 +04:00
neigh_node = batadv_orig_node_get_router ( orig_node ) ;
2011-04-27 16:27:57 +04:00
if ( ! neigh_node )
goto out ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Sending ROAMING_ADV to %pM (client %pM) via %pM \n " ,
orig_node - > orig , client , neigh_node - > addr ) ;
2011-04-27 16:27:57 +04:00
2012-04-20 19:02:45 +04:00
batadv_inc_counter ( bat_priv , BAT_CNT_TT_ROAM_ADV_TX ) ;
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb , neigh_node - > if_incoming , neigh_node - > addr ) ;
2011-04-27 16:27:57 +04:00
ret = 0 ;
out :
if ( neigh_node )
2012-05-12 04:09:34 +04:00
batadv_neigh_node_free_ref ( neigh_node ) ;
2011-04-27 16:27:57 +04:00
if ( ret )
kfree_skb ( skb ) ;
return ;
2011-04-27 16:27:44 +04:00
}
static void tt_purge ( struct work_struct * work )
{
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
struct bat_priv * bat_priv =
container_of ( delayed_work , struct bat_priv , tt_work ) ;
tt_local_purge ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
tt_global_roam_purge ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
tt_req_purge ( bat_priv ) ;
2011-04-27 16:27:57 +04:00
tt_roam_purge ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
tt_start_timer ( bat_priv ) ;
}
2011-04-27 16:27:57 +04:00
2012-05-12 04:09:39 +04:00
void batadv_tt_free ( struct bat_priv * bat_priv )
2011-04-27 16:27:57 +04:00
{
cancel_delayed_work_sync ( & bat_priv - > tt_work ) ;
tt_local_table_free ( bat_priv ) ;
tt_global_table_free ( bat_priv ) ;
tt_req_list_free ( bat_priv ) ;
tt_changes_list_free ( bat_priv ) ;
tt_roam_list_free ( bat_priv ) ;
kfree ( bat_priv - > tt_buff ) ;
}
2011-07-07 03:40:58 +04:00
2011-11-07 19:47:01 +04:00
/* This function will enable or disable the specified flags for all the entries
2012-05-12 04:09:43 +04:00
* in the given hash table and returns the number of modified entries
*/
2011-11-07 19:47:01 +04:00
static uint16_t tt_set_flags ( struct hashtable_t * hash , uint16_t flags ,
bool enable )
2011-07-07 03:40:58 +04:00
{
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-11-07 19:47:01 +04:00
uint16_t changed_num = 0 ;
2011-07-07 03:40:58 +04:00
struct hlist_head * head ;
struct hlist_node * node ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
2011-11-07 19:47:01 +04:00
goto out ;
2011-07-07 03:40:58 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_rcu ( tt_common_entry , node ,
2011-07-07 03:40:58 +04:00
head , hash_entry ) {
2011-11-07 19:47:01 +04:00
if ( enable ) {
if ( ( tt_common_entry - > flags & flags ) = = flags )
continue ;
tt_common_entry - > flags | = flags ;
} else {
if ( ! ( tt_common_entry - > flags & flags ) )
continue ;
tt_common_entry - > flags & = ~ flags ;
}
changed_num + + ;
2011-07-07 03:40:58 +04:00
}
rcu_read_unlock ( ) ;
}
2011-11-07 19:47:01 +04:00
out :
return changed_num ;
2011-07-07 03:40:58 +04:00
}
/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
static void tt_local_purge_pending_clients ( struct bat_priv * bat_priv )
{
struct hashtable_t * hash = bat_priv - > tt_local_hash ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry * tt_common_entry ;
2011-07-07 03:40:58 +04:00
struct tt_local_entry * tt_local_entry ;
struct hlist_node * node , * node_tmp ;
struct hlist_head * head ;
spinlock_t * list_lock ; /* protects write access to the hash lists */
2011-10-05 19:05:25 +04:00
uint32_t i ;
2011-07-07 03:40:58 +04:00
if ( ! hash )
return ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
2011-10-30 15:17:33 +04:00
hlist_for_each_entry_safe ( tt_common_entry , node , node_tmp ,
2011-07-07 03:40:58 +04:00
head , hash_entry ) {
2011-10-30 15:17:33 +04:00
if ( ! ( tt_common_entry - > flags & TT_CLIENT_PENDING ) )
2011-07-07 03:40:58 +04:00
continue ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Deleting local tt entry (%pM): pending \n " ,
tt_common_entry - > addr ) ;
2011-07-07 03:40:58 +04:00
atomic_dec ( & bat_priv - > num_local_tt ) ;
hlist_del_rcu ( node ) ;
2011-10-30 15:17:33 +04:00
tt_local_entry = container_of ( tt_common_entry ,
struct tt_local_entry ,
common ) ;
2011-07-07 03:40:58 +04:00
tt_local_entry_free_ref ( tt_local_entry ) ;
}
spin_unlock_bh ( list_lock ) ;
}
}
2012-05-07 00:22:05 +04:00
static int tt_commit_changes ( struct bat_priv * bat_priv ,
unsigned char * * packet_buff , int * packet_buff_len ,
int packet_min_len )
2011-07-07 03:40:58 +04:00
{
2012-05-07 00:22:05 +04:00
uint16_t changed_num = 0 ;
if ( atomic_read ( & bat_priv - > tt_local_changes ) < 1 )
return - ENOENT ;
changed_num = tt_set_flags ( bat_priv - > tt_local_hash ,
TT_CLIENT_NEW , false ) ;
/* all reset entries have to be counted as local entries */
2011-11-07 19:47:01 +04:00
atomic_add ( changed_num , & bat_priv - > num_local_tt ) ;
2011-07-07 03:40:58 +04:00
tt_local_purge_pending_clients ( bat_priv ) ;
2012-05-07 00:22:05 +04:00
bat_priv - > tt_crc = batadv_tt_local_crc ( bat_priv ) ;
2011-07-07 03:40:58 +04:00
/* Increment the TTVN only once per OGM interval */
atomic_inc ( & bat_priv - > ttvn ) ;
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" Local changes committed, updating to ttvn %u \n " ,
( uint8_t ) atomic_read ( & bat_priv - > ttvn ) ) ;
2011-07-07 03:40:58 +04:00
bat_priv - > tt_poss_change = false ;
2012-05-07 00:22:05 +04:00
/* reset the sending counter */
atomic_set ( & bat_priv - > tt_ogm_append_cnt , TT_OGM_APPEND_MAX ) ;
return tt_changes_fill_buff ( bat_priv , packet_buff ,
packet_buff_len , packet_min_len ) ;
}
/* when calling this function (hard_iface == primary_if) has to be true */
int batadv_tt_append_diff ( struct bat_priv * bat_priv ,
unsigned char * * packet_buff , int * packet_buff_len ,
int packet_min_len )
{
int tt_num_changes ;
/* if at least one change happened */
tt_num_changes = tt_commit_changes ( bat_priv , packet_buff ,
packet_buff_len , packet_min_len ) ;
/* if the changes have been sent often enough */
if ( ( tt_num_changes < 0 ) & &
( ! atomic_dec_not_zero ( & bat_priv - > tt_ogm_append_cnt ) ) ) {
tt_realloc_packet_buff ( packet_buff , packet_buff_len ,
packet_min_len , packet_min_len ) ;
tt_num_changes = 0 ;
}
return tt_num_changes ;
2011-07-07 03:40:58 +04:00
}
2011-07-07 17:35:36 +04:00
2012-05-12 04:09:39 +04:00
bool batadv_is_ap_isolated ( struct bat_priv * bat_priv , uint8_t * src ,
uint8_t * dst )
2011-07-07 17:35:36 +04:00
{
struct tt_local_entry * tt_local_entry = NULL ;
struct tt_global_entry * tt_global_entry = NULL ;
bool ret = true ;
if ( ! atomic_read ( & bat_priv - > ap_isolation ) )
return false ;
tt_local_entry = tt_local_hash_find ( bat_priv , dst ) ;
if ( ! tt_local_entry )
goto out ;
tt_global_entry = tt_global_hash_find ( bat_priv , src ) ;
if ( ! tt_global_entry )
goto out ;
if ( _is_ap_isolated ( tt_local_entry , tt_global_entry ) )
goto out ;
ret = false ;
out :
if ( tt_global_entry )
tt_global_entry_free_ref ( tt_global_entry ) ;
if ( tt_local_entry )
tt_local_entry_free_ref ( tt_local_entry ) ;
return ret ;
}
2011-07-30 15:10:18 +04:00
2012-05-12 04:09:39 +04:00
void batadv_tt_update_orig ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
const unsigned char * tt_buff , uint8_t tt_num_changes ,
uint8_t ttvn , uint16_t tt_crc )
2011-07-30 15:10:18 +04:00
{
uint8_t orig_ttvn = ( uint8_t ) atomic_read ( & orig_node - > last_ttvn ) ;
bool full_table = true ;
2012-01-22 23:00:23 +04:00
/* don't care about a backbone gateways updates. */
2012-05-12 15:38:47 +04:00
if ( batadv_bla_is_backbone_gw_orig ( bat_priv , orig_node - > orig ) )
2012-01-22 23:00:23 +04:00
return ;
2011-11-07 19:36:40 +04:00
/* orig table not initialised AND first diff is in the OGM OR the ttvn
2012-05-12 04:09:43 +04:00
* increased by one - > we can apply the attached changes
*/
2011-11-07 19:36:40 +04:00
if ( ( ! orig_node - > tt_initialised & & ttvn = = 1 ) | |
ttvn - orig_ttvn = = 1 ) {
2011-07-30 15:10:18 +04:00
/* the OGM could not contain the changes due to their size or
* because they have already been sent TT_OGM_APPEND_MAX times .
2012-05-12 04:09:43 +04:00
* In this case send a tt request
*/
2011-07-30 15:10:18 +04:00
if ( ! tt_num_changes ) {
full_table = false ;
goto request_table ;
}
tt_update_changes ( bat_priv , orig_node , tt_num_changes , ttvn ,
( struct tt_change * ) tt_buff ) ;
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
2012-05-12 04:09:43 +04:00
* in the global table
*/
2011-07-30 15:10:18 +04:00
orig_node - > tt_crc = tt_global_crc ( bat_priv , orig_node ) ;
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
* ( due to the wrap around ) . Thus a node has to check whether
* the resulting table ( after applying the changes ) is still
* consistent or not . E . g . a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX : in this case
* checking the CRC value is mandatory to detect the
2012-05-12 04:09:43 +04:00
* inconsistency
*/
2011-07-30 15:10:18 +04:00
if ( orig_node - > tt_crc ! = tt_crc )
goto request_table ;
/* Roaming phase is over: tables are in sync again. I can
2012-05-12 04:09:43 +04:00
* unset the flag
*/
2011-07-30 15:10:18 +04:00
orig_node - > tt_poss_change = false ;
} else {
/* if we missed more than one change or our tables are not
2012-05-12 04:09:43 +04:00
* in sync anymore - > request fresh tt data
*/
2011-11-07 19:36:40 +04:00
if ( ! orig_node - > tt_initialised | | ttvn ! = orig_ttvn | |
orig_node - > tt_crc ! = tt_crc ) {
2011-07-30 15:10:18 +04:00
request_table :
2012-05-12 15:48:58 +04:00
batadv_dbg ( DBG_TT , bat_priv ,
" TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u) \n " ,
orig_node - > orig , ttvn , orig_ttvn , tt_crc ,
orig_node - > tt_crc , tt_num_changes ) ;
2011-07-30 15:10:18 +04:00
send_tt_request ( bat_priv , orig_node , ttvn , tt_crc ,
full_table ) ;
return ;
}
}
}
2012-03-16 21:03:28 +04:00
/* returns true whether we know that the client has moved from its old
* originator to another one . This entry is kept is still kept for consistency
* purposes
*/
2012-05-12 04:09:39 +04:00
bool batadv_tt_global_client_is_roaming ( struct bat_priv * bat_priv ,
uint8_t * addr )
2012-03-16 21:03:28 +04:00
{
struct tt_global_entry * tt_global_entry ;
bool ret = false ;
tt_global_entry = tt_global_hash_find ( bat_priv , addr ) ;
if ( ! tt_global_entry )
goto out ;
ret = tt_global_entry - > common . flags & TT_CLIENT_ROAM ;
tt_global_entry_free_ref ( tt_global_entry ) ;
out :
return ret ;
}