2010-12-13 14:19:28 +03:00
/*
2012-01-01 03:41:38 +04:00
* Copyright ( C ) 2007 - 2012 B . A . T . M . A . N . contributors :
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*
*/
# include "main.h"
# include "send.h"
# include "routing.h"
# include "translation-table.h"
# include "soft-interface.h"
# include "hard-interface.h"
# include "vis.h"
# include "gateway_common.h"
# include "originator.h"
static void send_outstanding_bcast_packet ( struct work_struct * work ) ;
/* send out an already prepared packet to the given address via the
* specified batman interface */
2011-05-15 01:14:50 +04:00
int send_skb_packet ( struct sk_buff * skb , struct hard_iface * hard_iface ,
const uint8_t * dst_addr )
2010-12-13 14:19:28 +03:00
{
struct ethhdr * ethhdr ;
2011-02-18 15:33:20 +03:00
if ( hard_iface - > if_status ! = IF_ACTIVE )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
2011-02-18 15:33:20 +03:00
if ( unlikely ( ! hard_iface - > net_dev ) )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
2011-02-18 15:33:20 +03:00
if ( ! ( hard_iface - > net_dev - > flags & IFF_UP ) ) {
2012-03-07 12:07:45 +04:00
pr_warning ( " Interface %s is not up - can't send packet via that interface! \n " ,
hard_iface - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
}
/* push to the ethernet header. */
2011-05-15 01:14:54 +04:00
if ( my_skb_head_push ( skb , sizeof ( * ethhdr ) ) < 0 )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
skb_reset_mac_header ( skb ) ;
2012-03-07 12:07:48 +04:00
ethhdr = ( struct ethhdr * ) skb_mac_header ( skb ) ;
2011-02-18 15:33:20 +03:00
memcpy ( ethhdr - > h_source , hard_iface - > net_dev - > dev_addr , ETH_ALEN ) ;
2010-12-13 14:19:28 +03:00
memcpy ( ethhdr - > h_dest , dst_addr , ETH_ALEN ) ;
ethhdr - > h_proto = __constant_htons ( ETH_P_BATMAN ) ;
skb_set_network_header ( skb , ETH_HLEN ) ;
skb - > priority = TC_PRIO_CONTROL ;
skb - > protocol = __constant_htons ( ETH_P_BATMAN ) ;
2011-02-18 15:33:20 +03:00
skb - > dev = hard_iface - > net_dev ;
2010-12-13 14:19:28 +03:00
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping , it drops and returns NET_XMIT_DROP
* ( which is > 0 ) . This will not be treated as an error . */
return dev_queue_xmit ( skb ) ;
send_skb_err :
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2011-04-27 16:27:44 +04:00
static void realloc_packet_buffer ( struct hard_iface * hard_iface ,
2011-07-29 19:31:50 +04:00
int new_len )
2010-12-13 14:19:28 +03:00
{
unsigned char * new_buff ;
new_buff = kmalloc ( new_len , GFP_ATOMIC ) ;
/* keep old buffer if kmalloc should fail */
if ( new_buff ) {
2011-02-18 15:33:20 +03:00
memcpy ( new_buff , hard_iface - > packet_buff ,
2012-02-07 13:20:50 +04:00
BATMAN_OGM_HLEN ) ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
kfree ( hard_iface - > packet_buff ) ;
hard_iface - > packet_buff = new_buff ;
hard_iface - > packet_len = new_len ;
2010-12-13 14:19:28 +03:00
}
}
2011-04-27 16:27:44 +04:00
/* when calling this function (hard_iface == primary_if) has to be true */
2011-08-03 11:09:30 +04:00
static int prepare_packet_buffer ( struct bat_priv * bat_priv ,
2011-04-27 16:27:44 +04:00
struct hard_iface * hard_iface )
{
int new_len ;
2012-02-07 13:20:50 +04:00
new_len = BATMAN_OGM_HLEN +
2011-04-27 16:27:44 +04:00
tt_len ( ( uint8_t ) atomic_read ( & bat_priv - > tt_local_changes ) ) ;
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented */
if ( new_len > hard_iface - > soft_iface - > mtu )
2012-02-07 13:20:50 +04:00
new_len = BATMAN_OGM_HLEN ;
2011-04-27 16:27:44 +04:00
realloc_packet_buffer ( hard_iface , new_len ) ;
atomic_set ( & bat_priv - > tt_crc , tt_local_crc ( bat_priv ) ) ;
/* reset the sending counter */
atomic_set ( & bat_priv - > tt_ogm_append_cnt , TT_OGM_APPEND_MAX ) ;
2011-08-03 11:09:30 +04:00
return tt_changes_fill_buffer ( bat_priv ,
2012-02-07 13:20:50 +04:00
hard_iface - > packet_buff + BATMAN_OGM_HLEN ,
hard_iface - > packet_len - BATMAN_OGM_HLEN ) ;
2011-04-27 16:27:44 +04:00
}
2011-08-03 11:09:30 +04:00
static int reset_packet_buffer ( struct bat_priv * bat_priv ,
2011-07-29 19:31:50 +04:00
struct hard_iface * hard_iface )
2011-04-27 16:27:44 +04:00
{
2012-02-07 13:20:50 +04:00
realloc_packet_buffer ( hard_iface , BATMAN_OGM_HLEN ) ;
2011-08-03 11:09:30 +04:00
return 0 ;
2011-04-27 16:27:44 +04:00
}
2011-08-03 11:09:30 +04:00
void schedule_bat_ogm ( struct hard_iface * hard_iface )
2010-12-13 14:19:28 +03:00
{
2011-02-18 15:33:20 +03:00
struct bat_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if ;
2011-08-03 11:09:30 +04:00
int tt_num_changes = - 1 ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
if ( ( hard_iface - > if_status = = IF_NOT_IN_USE ) | |
( hard_iface - > if_status = = IF_TO_BE_REMOVED ) )
2010-12-13 14:19:28 +03:00
return ;
/**
* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface ( ) where the originator mac is set and
* outdated packets ( especially uninitialized mac addresses ) in the
* packet queue
*/
2011-02-18 15:33:20 +03:00
if ( hard_iface - > if_status = = IF_TO_BE_ACTIVATED )
hard_iface - > if_status = IF_ACTIVE ;
2010-12-13 14:19:28 +03:00
2011-08-03 11:09:30 +04:00
primary_if = primary_if_get_selected ( bat_priv ) ;
2011-04-27 16:27:44 +04:00
if ( hard_iface = = primary_if ) {
/* if at least one change happened */
if ( atomic_read ( & bat_priv - > tt_local_changes ) > 0 ) {
2011-07-07 03:40:58 +04:00
tt_commit_changes ( bat_priv ) ;
2011-08-03 11:09:30 +04:00
tt_num_changes = prepare_packet_buffer ( bat_priv ,
hard_iface ) ;
2011-04-27 16:27:44 +04:00
}
2011-07-09 19:52:13 +04:00
/* if the changes have been sent often enough */
2011-04-27 16:27:44 +04:00
if ( ! atomic_dec_not_zero ( & bat_priv - > tt_ogm_append_cnt ) )
2011-08-03 11:09:30 +04:00
tt_num_changes = reset_packet_buffer ( bat_priv ,
hard_iface ) ;
2011-04-27 16:27:44 +04:00
}
2010-12-13 14:19:28 +03:00
2011-04-20 17:40:58 +04:00
if ( primary_if )
hardif_free_ref ( primary_if ) ;
2010-12-13 14:19:28 +03:00
2011-11-28 17:31:55 +04:00
bat_priv - > bat_algo_ops - > bat_ogm_schedule ( hard_iface , tt_num_changes ) ;
2010-12-13 14:19:28 +03:00
}
static void forw_packet_free ( struct forw_packet * forw_packet )
{
if ( forw_packet - > skb )
kfree_skb ( forw_packet - > skb ) ;
2011-05-11 22:59:06 +04:00
if ( forw_packet - > if_incoming )
hardif_free_ref ( forw_packet - > if_incoming ) ;
2010-12-13 14:19:28 +03:00
kfree ( forw_packet ) ;
}
static void _add_bcast_packet_to_list ( struct bat_priv * bat_priv ,
struct forw_packet * forw_packet ,
unsigned long send_time )
{
INIT_HLIST_NODE ( & forw_packet - > list ) ;
/* add new packet to packet list */
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_add_head ( & forw_packet - > list , & bat_priv - > forw_bcast_list ) ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
/* start timer for this packet */
INIT_DELAYED_WORK ( & forw_packet - > delayed_work ,
send_outstanding_bcast_packet ) ;
queue_delayed_work ( bat_event_workqueue , & forw_packet - > delayed_work ,
send_time ) ;
}
/* add a broadcast packet to the queue and setup timers. broadcast packets
2011-07-09 19:52:13 +04:00
* are sent multiple times to increase probability for being received .
2010-12-13 14:19:28 +03:00
*
* This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
* errors .
*
* The skb is not consumed , so the caller should make sure that the
* skb is freed . */
2011-05-15 01:14:50 +04:00
int add_bcast_packet_to_list ( struct bat_priv * bat_priv ,
2011-06-25 21:09:12 +04:00
const struct sk_buff * skb , unsigned long delay )
2010-12-13 14:19:28 +03:00
{
2011-04-20 17:40:58 +04:00
struct hard_iface * primary_if = NULL ;
2010-12-13 14:19:28 +03:00
struct forw_packet * forw_packet ;
struct bcast_packet * bcast_packet ;
2011-05-15 01:14:50 +04:00
struct sk_buff * newskb ;
2010-12-13 14:19:28 +03:00
if ( ! atomic_dec_not_zero ( & bat_priv - > bcast_queue_left ) ) {
bat_dbg ( DBG_BATMAN , bat_priv , " bcast packet queue full \n " ) ;
goto out ;
}
2011-04-20 17:40:58 +04:00
primary_if = primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
2011-05-14 22:01:22 +04:00
goto out_and_inc ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:54 +04:00
forw_packet = kmalloc ( sizeof ( * forw_packet ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! forw_packet )
goto out_and_inc ;
2011-05-15 01:14:50 +04:00
newskb = skb_copy ( skb , GFP_ATOMIC ) ;
if ( ! newskb )
2010-12-13 14:19:28 +03:00
goto packet_free ;
/* as we have a copy now, it is safe to decrease the TTL */
2011-05-15 01:14:50 +04:00
bcast_packet = ( struct bcast_packet * ) newskb - > data ;
2011-11-20 18:47:38 +04:00
bcast_packet - > header . ttl - - ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:50 +04:00
skb_reset_mac_header ( newskb ) ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:50 +04:00
forw_packet - > skb = newskb ;
2011-04-20 17:40:58 +04:00
forw_packet - > if_incoming = primary_if ;
2010-12-13 14:19:28 +03:00
/* how often did we send the bcast packet ? */
forw_packet - > num_packets = 0 ;
2011-06-25 21:09:12 +04:00
_add_bcast_packet_to_list ( bat_priv , forw_packet , delay ) ;
2010-12-13 14:19:28 +03:00
return NETDEV_TX_OK ;
packet_free :
kfree ( forw_packet ) ;
out_and_inc :
atomic_inc ( & bat_priv - > bcast_queue_left ) ;
out :
2011-04-20 17:40:58 +04:00
if ( primary_if )
hardif_free_ref ( primary_if ) ;
2010-12-13 14:19:28 +03:00
return NETDEV_TX_BUSY ;
}
static void send_outstanding_bcast_packet ( struct work_struct * work )
{
2011-02-18 15:33:20 +03:00
struct hard_iface * hard_iface ;
2010-12-13 14:19:28 +03:00
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
struct forw_packet * forw_packet =
container_of ( delayed_work , struct forw_packet , delayed_work ) ;
struct sk_buff * skb1 ;
struct net_device * soft_iface = forw_packet - > if_incoming - > soft_iface ;
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_del ( & forw_packet - > list ) ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
if ( atomic_read ( & bat_priv - > mesh_state ) = = MESH_DEACTIVATING )
goto out ;
/* rebroadcast packet */
rcu_read_lock ( ) ;
2011-02-18 15:33:20 +03:00
list_for_each_entry_rcu ( hard_iface , & hardif_list , list ) {
if ( hard_iface - > soft_iface ! = soft_iface )
2010-12-13 14:19:28 +03:00
continue ;
/* send a copy of the saved skb */
skb1 = skb_clone ( forw_packet - > skb , GFP_ATOMIC ) ;
if ( skb1 )
2011-02-18 15:33:20 +03:00
send_skb_packet ( skb1 , hard_iface , broadcast_addr ) ;
2010-12-13 14:19:28 +03:00
}
rcu_read_unlock ( ) ;
forw_packet - > num_packets + + ;
/* if we still have some more bcasts to send */
if ( forw_packet - > num_packets < 3 ) {
_add_bcast_packet_to_list ( bat_priv , forw_packet ,
( ( 5 * HZ ) / 1000 ) ) ;
return ;
}
out :
forw_packet_free ( forw_packet ) ;
atomic_inc ( & bat_priv - > bcast_queue_left ) ;
}
2011-08-03 11:09:30 +04:00
void send_outstanding_bat_ogm_packet ( struct work_struct * work )
2010-12-13 14:19:28 +03:00
{
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
struct forw_packet * forw_packet =
container_of ( delayed_work , struct forw_packet , delayed_work ) ;
struct bat_priv * bat_priv ;
bat_priv = netdev_priv ( forw_packet - > if_incoming - > soft_iface ) ;
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
hlist_del ( & forw_packet - > list ) ;
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
if ( atomic_read ( & bat_priv - > mesh_state ) = = MESH_DEACTIVATING )
goto out ;
2011-11-28 17:31:55 +04:00
bat_priv - > bat_algo_ops - > bat_ogm_emit ( forw_packet ) ;
2010-12-13 14:19:28 +03:00
/**
* we have to have at least one packet in the queue
* to determine the queues wake up time unless we are
* shutting down
*/
if ( forw_packet - > own )
2011-08-03 11:09:30 +04:00
schedule_bat_ogm ( forw_packet - > if_incoming ) ;
2010-12-13 14:19:28 +03:00
out :
/* don't count own packet */
if ( ! forw_packet - > own )
atomic_inc ( & bat_priv - > batman_queue_left ) ;
forw_packet_free ( forw_packet ) ;
}
void purge_outstanding_packets ( struct bat_priv * bat_priv ,
2011-05-15 01:14:50 +04:00
const struct hard_iface * hard_iface )
2010-12-13 14:19:28 +03:00
{
struct forw_packet * forw_packet ;
struct hlist_node * tmp_node , * safe_tmp_node ;
2011-05-11 22:59:06 +04:00
bool pending ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
if ( hard_iface )
2010-12-13 14:19:28 +03:00
bat_dbg ( DBG_BATMAN , bat_priv ,
" purge_outstanding_packets(): %s \n " ,
2011-02-18 15:33:20 +03:00
hard_iface - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
else
bat_dbg ( DBG_BATMAN , bat_priv ,
" purge_outstanding_packets() \n " ) ;
/* free bcast list */
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_for_each_entry_safe ( forw_packet , tmp_node , safe_tmp_node ,
& bat_priv - > forw_bcast_list , list ) {
/**
2011-07-09 19:52:13 +04:00
* if purge_outstanding_packets ( ) was called with an argument
2010-12-13 14:19:28 +03:00
* we delete only packets belonging to the given interface
*/
2011-02-18 15:33:20 +03:00
if ( ( hard_iface ) & &
( forw_packet - > if_incoming ! = hard_iface ) )
2010-12-13 14:19:28 +03:00
continue ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
/**
* send_outstanding_bcast_packet ( ) will lock the list to
* delete the item from the list
*/
2011-05-11 22:59:06 +04:00
pending = cancel_delayed_work_sync ( & forw_packet - > delayed_work ) ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
2011-05-11 22:59:06 +04:00
if ( pending ) {
hlist_del ( & forw_packet - > list ) ;
forw_packet_free ( forw_packet ) ;
}
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
/* free batman packet list */
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
hlist_for_each_entry_safe ( forw_packet , tmp_node , safe_tmp_node ,
& bat_priv - > forw_bat_list , list ) {
/**
2011-07-09 19:52:13 +04:00
* if purge_outstanding_packets ( ) was called with an argument
2010-12-13 14:19:28 +03:00
* we delete only packets belonging to the given interface
*/
2011-02-18 15:33:20 +03:00
if ( ( hard_iface ) & &
( forw_packet - > if_incoming ! = hard_iface ) )
2010-12-13 14:19:28 +03:00
continue ;
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
/**
* send_outstanding_bat_packet ( ) will lock the list to
* delete the item from the list
*/
2011-05-11 22:59:06 +04:00
pending = cancel_delayed_work_sync ( & forw_packet - > delayed_work ) ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
2011-05-11 22:59:06 +04:00
if ( pending ) {
hlist_del ( & forw_packet - > list ) ;
forw_packet_free ( forw_packet ) ;
}
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
}