2010-12-13 14:19:28 +03:00
/*
2011-01-27 12:38:15 +03:00
* Copyright ( C ) 2007 - 2011 B . A . T . M . A . N . contributors :
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*
*/
# include "main.h"
# include "translation-table.h"
# include "soft-interface.h"
2011-04-20 17:40:58 +04:00
# include "hard-interface.h"
2010-12-13 14:19:28 +03:00
# include "hash.h"
# include "originator.h"
static void hna_local_purge ( struct work_struct * work ) ;
static void _hna_global_del_orig ( struct bat_priv * bat_priv ,
struct hna_global_entry * hna_global_entry ,
char * message ) ;
2011-02-18 15:28:09 +03:00
/* returns 1 if they are the same mac addr */
static int compare_lhna ( struct hlist_node * node , void * data2 )
{
void * data1 = container_of ( node , struct hna_local_entry , hash_entry ) ;
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
/* returns 1 if they are the same mac addr */
static int compare_ghna ( struct hlist_node * node , void * data2 )
{
void * data1 = container_of ( node , struct hna_global_entry , hash_entry ) ;
return ( memcmp ( data1 , data2 , ETH_ALEN ) = = 0 ? 1 : 0 ) ;
}
2010-12-13 14:19:28 +03:00
static void hna_local_start_timer ( struct bat_priv * bat_priv )
{
INIT_DELAYED_WORK ( & bat_priv - > hna_work , hna_local_purge ) ;
queue_delayed_work ( bat_event_workqueue , & bat_priv - > hna_work , 10 * HZ ) ;
}
2011-02-18 15:28:09 +03:00
static struct hna_local_entry * hna_local_hash_find ( struct bat_priv * bat_priv ,
void * data )
{
struct hashtable_t * hash = bat_priv - > hna_local_hash ;
struct hlist_head * head ;
struct hlist_node * node ;
struct hna_local_entry * hna_local_entry , * hna_local_entry_tmp = NULL ;
int index ;
if ( ! hash )
return NULL ;
index = choose_orig ( data , hash - > size ) ;
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( hna_local_entry , node , head , hash_entry ) {
if ( ! compare_eth ( hna_local_entry , data ) )
continue ;
hna_local_entry_tmp = hna_local_entry ;
break ;
}
rcu_read_unlock ( ) ;
return hna_local_entry_tmp ;
}
static struct hna_global_entry * hna_global_hash_find ( struct bat_priv * bat_priv ,
void * data )
{
struct hashtable_t * hash = bat_priv - > hna_global_hash ;
struct hlist_head * head ;
struct hlist_node * node ;
struct hna_global_entry * hna_global_entry ;
struct hna_global_entry * hna_global_entry_tmp = NULL ;
int index ;
if ( ! hash )
return NULL ;
index = choose_orig ( data , hash - > size ) ;
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( hna_global_entry , node , head , hash_entry ) {
if ( ! compare_eth ( hna_global_entry , data ) )
continue ;
hna_global_entry_tmp = hna_global_entry ;
break ;
}
rcu_read_unlock ( ) ;
return hna_global_entry_tmp ;
}
2010-12-13 14:19:28 +03:00
int hna_local_init ( struct bat_priv * bat_priv )
{
if ( bat_priv - > hna_local_hash )
return 1 ;
bat_priv - > hna_local_hash = hash_new ( 1024 ) ;
if ( ! bat_priv - > hna_local_hash )
return 0 ;
atomic_set ( & bat_priv - > hna_local_changed , 0 ) ;
hna_local_start_timer ( bat_priv ) ;
return 1 ;
}
void hna_local_add ( struct net_device * soft_iface , uint8_t * addr )
{
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
struct hna_local_entry * hna_local_entry ;
struct hna_global_entry * hna_global_entry ;
int required_bytes ;
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
2011-02-18 15:28:09 +03:00
hna_local_entry = hna_local_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
if ( hna_local_entry ) {
hna_local_entry - > last_seen = jiffies ;
return ;
}
/* only announce as many hosts as possible in the batman-packet and
space in batman_packet - > num_hna That also should give a limit to
MAC - flooding . */
required_bytes = ( bat_priv - > num_local_hna + 1 ) * ETH_ALEN ;
required_bytes + = BAT_PACKET_LEN ;
if ( ( required_bytes > ETH_DATA_LEN ) | |
( atomic_read ( & bat_priv - > aggregated_ogms ) & &
required_bytes > MAX_AGGREGATION_BYTES ) | |
( bat_priv - > num_local_hna + 1 > 255 ) ) {
bat_dbg ( DBG_ROUTES , bat_priv ,
" Can't add new local hna entry (%pM): "
" number of local hna entries exceeds packet size \n " ,
addr ) ;
return ;
}
bat_dbg ( DBG_ROUTES , bat_priv ,
" Creating new local hna entry: %pM \n " , addr ) ;
hna_local_entry = kmalloc ( sizeof ( struct hna_local_entry ) , GFP_ATOMIC ) ;
if ( ! hna_local_entry )
return ;
memcpy ( hna_local_entry - > addr , addr , ETH_ALEN ) ;
hna_local_entry - > last_seen = jiffies ;
/* the batman interface mac address should never be purged */
2011-02-18 15:28:08 +03:00
if ( compare_eth ( addr , soft_iface - > dev_addr ) )
2010-12-13 14:19:28 +03:00
hna_local_entry - > never_purge = 1 ;
else
hna_local_entry - > never_purge = 0 ;
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
2011-02-18 15:28:09 +03:00
hash_add ( bat_priv - > hna_local_hash , compare_lhna , choose_orig ,
hna_local_entry , & hna_local_entry - > hash_entry ) ;
2010-12-13 14:19:28 +03:00
bat_priv - > num_local_hna + + ;
atomic_set ( & bat_priv - > hna_local_changed , 1 ) ;
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
/* remove address from global hash if present */
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
2011-02-18 15:28:09 +03:00
hna_global_entry = hna_global_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
if ( hna_global_entry )
_hna_global_del_orig ( bat_priv , hna_global_entry ,
" local hna received " ) ;
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
}
int hna_local_fill_buffer ( struct bat_priv * bat_priv ,
unsigned char * buff , int buff_len )
{
struct hashtable_t * hash = bat_priv - > hna_local_hash ;
struct hna_local_entry * hna_local_entry ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-02-18 15:28:09 +03:00
int i , count = 0 ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( hna_local_entry , node ,
head , hash_entry ) {
2010-12-13 14:19:28 +03:00
if ( buff_len < ( count + 1 ) * ETH_ALEN )
break ;
memcpy ( buff + ( count * ETH_ALEN ) , hna_local_entry - > addr ,
ETH_ALEN ) ;
count + + ;
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
/* if we did not get all new local hnas see you next time ;-) */
if ( count = = bat_priv - > num_local_hna )
atomic_set ( & bat_priv - > hna_local_changed , 0 ) ;
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
2010-12-20 21:32:03 +03:00
return count ;
2010-12-13 14:19:28 +03:00
}
int hna_local_seq_print_text ( struct seq_file * seq , void * offset )
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
struct hashtable_t * hash = bat_priv - > hna_local_hash ;
struct hna_local_entry * hna_local_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
size_t buf_size , pos ;
char * buff ;
2011-04-20 17:40:58 +04:00
int i , ret = 0 ;
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" please specify interfaces to enable it \n " ,
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" primary interface not active \n " ,
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
seq_printf ( seq , " Locally retrieved addresses (from %s) "
" announced via HNA: \n " ,
net_dev - > name ) ;
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
buf_size = 1 ;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
__hlist_for_each_rcu ( node , head )
2010-12-13 14:19:28 +03:00
buf_size + = 21 ;
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
buff = kmalloc ( buf_size , GFP_ATOMIC ) ;
if ( ! buff ) {
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
2011-04-20 17:40:58 +04:00
ret = - ENOMEM ;
goto out ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
2010-12-13 14:19:28 +03:00
buff [ 0 ] = ' \0 ' ;
pos = 0 ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( hna_local_entry , node ,
head , hash_entry ) {
2010-12-13 14:19:28 +03:00
pos + = snprintf ( buff + pos , 22 , " * %pM \n " ,
hna_local_entry - > addr ) ;
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
seq_printf ( seq , " %s " , buff ) ;
kfree ( buff ) ;
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
hardif_free_ref ( primary_if ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
2011-02-18 15:28:09 +03:00
static void _hna_local_del ( struct hlist_node * node , void * arg )
2010-12-13 14:19:28 +03:00
{
struct bat_priv * bat_priv = ( struct bat_priv * ) arg ;
2011-02-18 15:28:09 +03:00
void * data = container_of ( node , struct hna_local_entry , hash_entry ) ;
2010-12-13 14:19:28 +03:00
kfree ( data ) ;
bat_priv - > num_local_hna - - ;
atomic_set ( & bat_priv - > hna_local_changed , 1 ) ;
}
static void hna_local_del ( struct bat_priv * bat_priv ,
struct hna_local_entry * hna_local_entry ,
char * message )
{
bat_dbg ( DBG_ROUTES , bat_priv , " Deleting local hna entry (%pM): %s \n " ,
hna_local_entry - > addr , message ) ;
2011-02-18 15:28:09 +03:00
hash_remove ( bat_priv - > hna_local_hash , compare_lhna , choose_orig ,
2010-12-13 14:19:28 +03:00
hna_local_entry - > addr ) ;
2011-02-18 15:28:09 +03:00
_hna_local_del ( & hna_local_entry - > hash_entry , bat_priv ) ;
2010-12-13 14:19:28 +03:00
}
void hna_local_remove ( struct bat_priv * bat_priv ,
uint8_t * addr , char * message )
{
struct hna_local_entry * hna_local_entry ;
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
2011-02-18 15:28:09 +03:00
hna_local_entry = hna_local_hash_find ( bat_priv , addr ) ;
2010-12-13 14:19:28 +03:00
if ( hna_local_entry )
hna_local_del ( bat_priv , hna_local_entry , message ) ;
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
}
static void hna_local_purge ( struct work_struct * work )
{
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
struct bat_priv * bat_priv =
container_of ( delayed_work , struct bat_priv , hna_work ) ;
struct hashtable_t * hash = bat_priv - > hna_local_hash ;
struct hna_local_entry * hna_local_entry ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node , * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
unsigned long timeout ;
2011-02-18 15:28:09 +03:00
int i ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
hlist_for_each_entry_safe ( hna_local_entry , node , node_tmp ,
head , hash_entry ) {
if ( hna_local_entry - > never_purge )
continue ;
2010-12-13 14:19:28 +03:00
timeout = hna_local_entry - > last_seen ;
timeout + = LOCAL_HNA_TIMEOUT * HZ ;
2011-02-18 15:28:09 +03:00
if ( time_before ( jiffies , timeout ) )
continue ;
hna_local_del ( bat_priv , hna_local_entry ,
" address timed out " ) ;
2010-12-13 14:19:28 +03:00
}
}
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
hna_local_start_timer ( bat_priv ) ;
}
void hna_local_free ( struct bat_priv * bat_priv )
{
if ( ! bat_priv - > hna_local_hash )
return ;
cancel_delayed_work_sync ( & bat_priv - > hna_work ) ;
hash_delete ( bat_priv - > hna_local_hash , _hna_local_del , bat_priv ) ;
bat_priv - > hna_local_hash = NULL ;
}
int hna_global_init ( struct bat_priv * bat_priv )
{
if ( bat_priv - > hna_global_hash )
return 1 ;
bat_priv - > hna_global_hash = hash_new ( 1024 ) ;
if ( ! bat_priv - > hna_global_hash )
return 0 ;
return 1 ;
}
void hna_global_add_orig ( struct bat_priv * bat_priv ,
struct orig_node * orig_node ,
unsigned char * hna_buff , int hna_buff_len )
{
struct hna_global_entry * hna_global_entry ;
struct hna_local_entry * hna_local_entry ;
int hna_buff_count = 0 ;
unsigned char * hna_ptr ;
while ( ( hna_buff_count + 1 ) * ETH_ALEN < = hna_buff_len ) {
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
hna_ptr = hna_buff + ( hna_buff_count * ETH_ALEN ) ;
2011-02-18 15:28:09 +03:00
hna_global_entry = hna_global_hash_find ( bat_priv , hna_ptr ) ;
2010-12-13 14:19:28 +03:00
if ( ! hna_global_entry ) {
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
hna_global_entry =
kmalloc ( sizeof ( struct hna_global_entry ) ,
GFP_ATOMIC ) ;
if ( ! hna_global_entry )
break ;
memcpy ( hna_global_entry - > addr , hna_ptr , ETH_ALEN ) ;
bat_dbg ( DBG_ROUTES , bat_priv ,
" Creating new global hna entry: "
" %pM (via %pM) \n " ,
hna_global_entry - > addr , orig_node - > orig ) ;
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
2011-02-18 15:28:09 +03:00
hash_add ( bat_priv - > hna_global_hash , compare_ghna ,
choose_orig , hna_global_entry ,
& hna_global_entry - > hash_entry ) ;
2010-12-13 14:19:28 +03:00
}
hna_global_entry - > orig_node = orig_node ;
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
/* remove address from local hash if present */
spin_lock_bh ( & bat_priv - > hna_lhash_lock ) ;
hna_ptr = hna_buff + ( hna_buff_count * ETH_ALEN ) ;
2011-02-18 15:28:09 +03:00
hna_local_entry = hna_local_hash_find ( bat_priv , hna_ptr ) ;
2010-12-13 14:19:28 +03:00
if ( hna_local_entry )
hna_local_del ( bat_priv , hna_local_entry ,
" global hna received " ) ;
spin_unlock_bh ( & bat_priv - > hna_lhash_lock ) ;
hna_buff_count + + ;
}
/* initialize, and overwrite if malloc succeeds */
orig_node - > hna_buff = NULL ;
orig_node - > hna_buff_len = 0 ;
if ( hna_buff_len > 0 ) {
orig_node - > hna_buff = kmalloc ( hna_buff_len , GFP_ATOMIC ) ;
if ( orig_node - > hna_buff ) {
memcpy ( orig_node - > hna_buff , hna_buff , hna_buff_len ) ;
orig_node - > hna_buff_len = hna_buff_len ;
}
}
}
int hna_global_seq_print_text ( struct seq_file * seq , void * offset )
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct bat_priv * bat_priv = netdev_priv ( net_dev ) ;
struct hashtable_t * hash = bat_priv - > hna_global_hash ;
struct hna_global_entry * hna_global_entry ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-02-18 15:28:09 +03:00
struct hlist_node * node ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
size_t buf_size , pos ;
char * buff ;
2011-04-20 17:40:58 +04:00
int i , ret = 0 ;
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - please "
" specify interfaces to enable it \n " ,
net_dev - > name ) ;
goto out ;
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if - > if_status ! = IF_ACTIVE ) {
ret = seq_printf ( seq , " BATMAN mesh %s disabled - "
" primary interface not active \n " ,
net_dev - > name ) ;
goto out ;
2010-12-13 14:19:28 +03:00
}
seq_printf ( seq , " Globally announced HNAs received via the mesh %s \n " ,
net_dev - > name ) ;
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
buf_size = 1 ;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
__hlist_for_each_rcu ( node , head )
2010-12-13 14:19:28 +03:00
buf_size + = 43 ;
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
buff = kmalloc ( buf_size , GFP_ATOMIC ) ;
if ( ! buff ) {
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
2011-04-20 17:40:58 +04:00
ret = - ENOMEM ;
goto out ;
2010-12-13 14:19:28 +03:00
}
buff [ 0 ] = ' \0 ' ;
pos = 0 ;
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-02-18 15:28:09 +03:00
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( hna_global_entry , node ,
head , hash_entry ) {
2010-12-13 14:19:28 +03:00
pos + = snprintf ( buff + pos , 44 ,
" * %pM via %pM \n " ,
hna_global_entry - > addr ,
hna_global_entry - > orig_node - > orig ) ;
}
2011-02-18 15:28:09 +03:00
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
seq_printf ( seq , " %s " , buff ) ;
kfree ( buff ) ;
2011-04-20 17:40:58 +04:00
out :
if ( primary_if )
hardif_free_ref ( primary_if ) ;
return ret ;
2010-12-13 14:19:28 +03:00
}
static void _hna_global_del_orig ( struct bat_priv * bat_priv ,
struct hna_global_entry * hna_global_entry ,
char * message )
{
bat_dbg ( DBG_ROUTES , bat_priv ,
" Deleting global hna entry %pM (via %pM): %s \n " ,
hna_global_entry - > addr , hna_global_entry - > orig_node - > orig ,
message ) ;
2011-02-18 15:28:09 +03:00
hash_remove ( bat_priv - > hna_global_hash , compare_ghna , choose_orig ,
2010-12-13 14:19:28 +03:00
hna_global_entry - > addr ) ;
kfree ( hna_global_entry ) ;
}
void hna_global_del_orig ( struct bat_priv * bat_priv ,
struct orig_node * orig_node , char * message )
{
struct hna_global_entry * hna_global_entry ;
int hna_buff_count = 0 ;
unsigned char * hna_ptr ;
if ( orig_node - > hna_buff_len = = 0 )
return ;
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
while ( ( hna_buff_count + 1 ) * ETH_ALEN < = orig_node - > hna_buff_len ) {
hna_ptr = orig_node - > hna_buff + ( hna_buff_count * ETH_ALEN ) ;
2011-02-18 15:28:09 +03:00
hna_global_entry = hna_global_hash_find ( bat_priv , hna_ptr ) ;
2010-12-13 14:19:28 +03:00
if ( ( hna_global_entry ) & &
( hna_global_entry - > orig_node = = orig_node ) )
_hna_global_del_orig ( bat_priv , hna_global_entry ,
message ) ;
hna_buff_count + + ;
}
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
orig_node - > hna_buff_len = 0 ;
kfree ( orig_node - > hna_buff ) ;
orig_node - > hna_buff = NULL ;
}
2011-02-18 15:28:09 +03:00
static void hna_global_del ( struct hlist_node * node , void * arg )
2010-12-13 14:19:28 +03:00
{
2011-02-18 15:28:09 +03:00
void * data = container_of ( node , struct hna_global_entry , hash_entry ) ;
2010-12-13 14:19:28 +03:00
kfree ( data ) ;
}
void hna_global_free ( struct bat_priv * bat_priv )
{
if ( ! bat_priv - > hna_global_hash )
return ;
hash_delete ( bat_priv - > hna_global_hash , hna_global_del , NULL ) ;
bat_priv - > hna_global_hash = NULL ;
}
struct orig_node * transtable_search ( struct bat_priv * bat_priv , uint8_t * addr )
{
struct hna_global_entry * hna_global_entry ;
2011-02-18 15:28:10 +03:00
struct orig_node * orig_node = NULL ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > hna_ghash_lock ) ;
2011-02-18 15:28:09 +03:00
hna_global_entry = hna_global_hash_find ( bat_priv , addr ) ;
2011-02-18 15:28:10 +03:00
if ( ! hna_global_entry )
goto out ;
2011-02-18 15:28:09 +03:00
2011-02-18 15:28:10 +03:00
if ( ! atomic_inc_not_zero ( & hna_global_entry - > orig_node - > refcount ) )
goto out ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:10 +03:00
orig_node = hna_global_entry - > orig_node ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:28:10 +03:00
out :
spin_unlock_bh ( & bat_priv - > hna_ghash_lock ) ;
return orig_node ;
2010-12-13 14:19:28 +03:00
}