2012-05-12 04:09:43 +04:00
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# include "main.h"
# include "send.h"
# include "routing.h"
# include "translation-table.h"
# include "soft-interface.h"
# include "hard-interface.h"
# include "vis.h"
# include "gateway_common.h"
# include "originator.h"
2012-05-16 22:23:14 +04:00
static void batadv_send_outstanding_bcast_packet ( struct work_struct * work ) ;
2010-12-13 14:19:28 +03:00
/* send out an already prepared packet to the given address via the
2012-05-12 04:09:43 +04:00
* specified batman interface
*/
2012-06-06 00:31:31 +04:00
int batadv_send_skb_packet ( struct sk_buff * skb ,
struct batadv_hard_iface * hard_iface ,
2012-05-12 04:09:37 +04:00
const uint8_t * dst_addr )
2010-12-13 14:19:28 +03:00
{
struct ethhdr * ethhdr ;
2012-06-04 00:19:19 +04:00
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
2011-02-18 15:33:20 +03:00
if ( unlikely ( ! hard_iface - > net_dev ) )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
2011-02-18 15:33:20 +03:00
if ( ! ( hard_iface - > net_dev - > flags & IFF_UP ) ) {
2012-03-26 18:22:45 +04:00
pr_warn ( " Interface %s is not up - can't send packet via that interface! \n " ,
hard_iface - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
}
/* push to the ethernet header. */
2012-05-12 04:09:38 +04:00
if ( batadv_skb_head_push ( skb , ETH_HLEN ) < 0 )
2010-12-13 14:19:28 +03:00
goto send_skb_err ;
skb_reset_mac_header ( skb ) ;
2012-03-07 12:07:48 +04:00
ethhdr = ( struct ethhdr * ) skb_mac_header ( skb ) ;
2011-02-18 15:33:20 +03:00
memcpy ( ethhdr - > h_source , hard_iface - > net_dev - > dev_addr , ETH_ALEN ) ;
2010-12-13 14:19:28 +03:00
memcpy ( ethhdr - > h_dest , dst_addr , ETH_ALEN ) ;
2012-06-04 00:19:13 +04:00
ethhdr - > h_proto = __constant_htons ( BATADV_ETH_P_BATMAN ) ;
2010-12-13 14:19:28 +03:00
skb_set_network_header ( skb , ETH_HLEN ) ;
skb - > priority = TC_PRIO_CONTROL ;
2012-06-04 00:19:13 +04:00
skb - > protocol = __constant_htons ( BATADV_ETH_P_BATMAN ) ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
skb - > dev = hard_iface - > net_dev ;
2010-12-13 14:19:28 +03:00
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping , it drops and returns NET_XMIT_DROP
2012-05-12 04:09:43 +04:00
* ( which is > 0 ) . This will not be treated as an error .
*/
2010-12-13 14:19:28 +03:00
return dev_queue_xmit ( skb ) ;
send_skb_err :
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2012-06-06 00:31:31 +04:00
void batadv_schedule_bat_ogm ( struct batadv_hard_iface * hard_iface )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:19 +04:00
if ( ( hard_iface - > if_status = = BATADV_IF_NOT_IN_USE ) | |
( hard_iface - > if_status = = BATADV_IF_TO_BE_REMOVED ) )
2010-12-13 14:19:28 +03:00
return ;
2012-05-12 04:09:43 +04:00
/* the interface gets activated here to avoid race conditions between
2010-12-13 14:19:28 +03:00
* the moment of activating the interface in
* hardif_activate_interface ( ) where the originator mac is set and
* outdated packets ( especially uninitialized mac addresses ) in the
* packet queue
*/
2012-06-04 00:19:19 +04:00
if ( hard_iface - > if_status = = BATADV_IF_TO_BE_ACTIVATED )
hard_iface - > if_status = BATADV_IF_ACTIVE ;
2010-12-13 14:19:28 +03:00
2012-05-07 00:22:05 +04:00
bat_priv - > bat_algo_ops - > bat_ogm_schedule ( hard_iface ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
static void batadv_forw_packet_free ( struct batadv_forw_packet * forw_packet )
2010-12-13 14:19:28 +03:00
{
if ( forw_packet - > skb )
kfree_skb ( forw_packet - > skb ) ;
2011-05-11 22:59:06 +04:00
if ( forw_packet - > if_incoming )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( forw_packet - > if_incoming ) ;
2010-12-13 14:19:28 +03:00
kfree ( forw_packet ) ;
}
2012-06-06 00:31:31 +04:00
static void
_batadv_add_bcast_packet_to_list ( struct batadv_priv * bat_priv ,
struct batadv_forw_packet * forw_packet ,
unsigned long send_time )
2010-12-13 14:19:28 +03:00
{
INIT_HLIST_NODE ( & forw_packet - > list ) ;
/* add new packet to packet list */
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_add_head ( & forw_packet - > list , & bat_priv - > forw_bcast_list ) ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
/* start timer for this packet */
INIT_DELAYED_WORK ( & forw_packet - > delayed_work ,
2012-05-16 22:23:14 +04:00
batadv_send_outstanding_bcast_packet ) ;
2012-05-12 04:09:42 +04:00
queue_delayed_work ( batadv_event_workqueue , & forw_packet - > delayed_work ,
2010-12-13 14:19:28 +03:00
send_time ) ;
}
/* add a broadcast packet to the queue and setup timers. broadcast packets
2011-07-09 19:52:13 +04:00
* are sent multiple times to increase probability for being received .
2010-12-13 14:19:28 +03:00
*
* This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
* errors .
*
* The skb is not consumed , so the caller should make sure that the
2012-05-12 04:09:43 +04:00
* skb is freed .
*/
2012-06-06 00:31:31 +04:00
int batadv_add_bcast_packet_to_list ( struct batadv_priv * bat_priv ,
2012-05-12 04:09:37 +04:00
const struct sk_buff * skb ,
unsigned long delay )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if = NULL ;
struct batadv_forw_packet * forw_packet ;
2012-06-06 00:31:30 +04:00
struct batadv_bcast_packet * bcast_packet ;
2011-05-15 01:14:50 +04:00
struct sk_buff * newskb ;
2010-12-13 14:19:28 +03:00
2012-05-16 22:23:22 +04:00
if ( ! batadv_atomic_dec_not_zero ( & bat_priv - > bcast_queue_left ) ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" bcast packet queue full \n " ) ;
2010-12-13 14:19:28 +03:00
goto out ;
}
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-04-20 17:40:58 +04:00
if ( ! primary_if )
2011-05-14 22:01:22 +04:00
goto out_and_inc ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:54 +04:00
forw_packet = kmalloc ( sizeof ( * forw_packet ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! forw_packet )
goto out_and_inc ;
2011-05-15 01:14:50 +04:00
newskb = skb_copy ( skb , GFP_ATOMIC ) ;
if ( ! newskb )
2010-12-13 14:19:28 +03:00
goto packet_free ;
/* as we have a copy now, it is safe to decrease the TTL */
2012-06-06 00:31:30 +04:00
bcast_packet = ( struct batadv_bcast_packet * ) newskb - > data ;
2011-11-20 18:47:38 +04:00
bcast_packet - > header . ttl - - ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:50 +04:00
skb_reset_mac_header ( newskb ) ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:50 +04:00
forw_packet - > skb = newskb ;
2011-04-20 17:40:58 +04:00
forw_packet - > if_incoming = primary_if ;
2010-12-13 14:19:28 +03:00
/* how often did we send the bcast packet ? */
forw_packet - > num_packets = 0 ;
2012-05-16 22:23:14 +04:00
_batadv_add_bcast_packet_to_list ( bat_priv , forw_packet , delay ) ;
2010-12-13 14:19:28 +03:00
return NETDEV_TX_OK ;
packet_free :
kfree ( forw_packet ) ;
out_and_inc :
atomic_inc ( & bat_priv - > bcast_queue_left ) ;
out :
2011-04-20 17:40:58 +04:00
if ( primary_if )
2012-05-12 15:48:54 +04:00
batadv_hardif_free_ref ( primary_if ) ;
2010-12-13 14:19:28 +03:00
return NETDEV_TX_BUSY ;
}
2012-05-16 22:23:14 +04:00
static void batadv_send_outstanding_bcast_packet ( struct work_struct * work )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * hard_iface ;
2010-12-13 14:19:28 +03:00
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
2012-06-06 00:31:31 +04:00
struct batadv_forw_packet * forw_packet ;
2010-12-13 14:19:28 +03:00
struct sk_buff * skb1 ;
2012-06-06 00:31:31 +04:00
struct net_device * soft_iface ;
struct batadv_priv * bat_priv ;
forw_packet = container_of ( delayed_work , struct batadv_forw_packet ,
delayed_work ) ;
soft_iface = forw_packet - > if_incoming - > soft_iface ;
bat_priv = netdev_priv ( soft_iface ) ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_del ( & forw_packet - > list ) ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
2012-06-04 00:19:22 +04:00
if ( atomic_read ( & bat_priv - > mesh_state ) = = BATADV_MESH_DEACTIVATING )
2010-12-13 14:19:28 +03:00
goto out ;
/* rebroadcast packet */
rcu_read_lock ( ) ;
2012-05-12 04:09:42 +04:00
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list , list ) {
2011-02-18 15:33:20 +03:00
if ( hard_iface - > soft_iface ! = soft_iface )
2010-12-13 14:19:28 +03:00
continue ;
/* send a copy of the saved skb */
skb1 = skb_clone ( forw_packet - > skb , GFP_ATOMIC ) ;
if ( skb1 )
2012-05-12 04:09:37 +04:00
batadv_send_skb_packet ( skb1 , hard_iface ,
2012-05-12 04:09:42 +04:00
batadv_broadcast_addr ) ;
2010-12-13 14:19:28 +03:00
}
rcu_read_unlock ( ) ;
forw_packet - > num_packets + + ;
/* if we still have some more bcasts to send */
if ( forw_packet - > num_packets < 3 ) {
2012-05-16 22:23:14 +04:00
_batadv_add_bcast_packet_to_list ( bat_priv , forw_packet ,
msecs_to_jiffies ( 5 ) ) ;
2010-12-13 14:19:28 +03:00
return ;
}
out :
2012-05-16 22:23:14 +04:00
batadv_forw_packet_free ( forw_packet ) ;
2010-12-13 14:19:28 +03:00
atomic_inc ( & bat_priv - > bcast_queue_left ) ;
}
2012-05-12 04:09:37 +04:00
void batadv_send_outstanding_bat_ogm_packet ( struct work_struct * work )
2010-12-13 14:19:28 +03:00
{
struct delayed_work * delayed_work =
container_of ( work , struct delayed_work , work ) ;
2012-06-06 00:31:31 +04:00
struct batadv_forw_packet * forw_packet ;
struct batadv_priv * bat_priv ;
2010-12-13 14:19:28 +03:00
2012-06-06 00:31:31 +04:00
forw_packet = container_of ( delayed_work , struct batadv_forw_packet ,
delayed_work ) ;
2010-12-13 14:19:28 +03:00
bat_priv = netdev_priv ( forw_packet - > if_incoming - > soft_iface ) ;
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
hlist_del ( & forw_packet - > list ) ;
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
2012-06-04 00:19:22 +04:00
if ( atomic_read ( & bat_priv - > mesh_state ) = = BATADV_MESH_DEACTIVATING )
2010-12-13 14:19:28 +03:00
goto out ;
2011-11-28 17:31:55 +04:00
bat_priv - > bat_algo_ops - > bat_ogm_emit ( forw_packet ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:43 +04:00
/* we have to have at least one packet in the queue
2010-12-13 14:19:28 +03:00
* to determine the queues wake up time unless we are
* shutting down
*/
if ( forw_packet - > own )
2012-05-12 04:09:37 +04:00
batadv_schedule_bat_ogm ( forw_packet - > if_incoming ) ;
2010-12-13 14:19:28 +03:00
out :
/* don't count own packet */
if ( ! forw_packet - > own )
atomic_inc ( & bat_priv - > batman_queue_left ) ;
2012-05-16 22:23:14 +04:00
batadv_forw_packet_free ( forw_packet ) ;
2010-12-13 14:19:28 +03:00
}
2012-06-06 00:31:31 +04:00
void
batadv_purge_outstanding_packets ( struct batadv_priv * bat_priv ,
const struct batadv_hard_iface * hard_iface )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_forw_packet * forw_packet ;
2010-12-13 14:19:28 +03:00
struct hlist_node * tmp_node , * safe_tmp_node ;
2011-05-11 22:59:06 +04:00
bool pending ;
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
if ( hard_iface )
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" purge_outstanding_packets(): %s \n " ,
hard_iface - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
else
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" purge_outstanding_packets() \n " ) ;
2010-12-13 14:19:28 +03:00
/* free bcast list */
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
hlist_for_each_entry_safe ( forw_packet , tmp_node , safe_tmp_node ,
& bat_priv - > forw_bcast_list , list ) {
2012-05-12 04:09:43 +04:00
/* if purge_outstanding_packets() was called with an argument
2010-12-13 14:19:28 +03:00
* we delete only packets belonging to the given interface
*/
2011-02-18 15:33:20 +03:00
if ( ( hard_iface ) & &
( forw_packet - > if_incoming ! = hard_iface ) )
2010-12-13 14:19:28 +03:00
continue ;
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
2012-05-16 22:23:14 +04:00
/* batadv_send_outstanding_bcast_packet() will lock the list to
2010-12-13 14:19:28 +03:00
* delete the item from the list
*/
2011-05-11 22:59:06 +04:00
pending = cancel_delayed_work_sync ( & forw_packet - > delayed_work ) ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
2011-05-11 22:59:06 +04:00
if ( pending ) {
hlist_del ( & forw_packet - > list ) ;
2012-05-16 22:23:14 +04:00
batadv_forw_packet_free ( forw_packet ) ;
2011-05-11 22:59:06 +04:00
}
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > forw_bcast_list_lock ) ;
/* free batman packet list */
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
hlist_for_each_entry_safe ( forw_packet , tmp_node , safe_tmp_node ,
& bat_priv - > forw_bat_list , list ) {
2012-05-12 04:09:43 +04:00
/* if purge_outstanding_packets() was called with an argument
2010-12-13 14:19:28 +03:00
* we delete only packets belonging to the given interface
*/
2011-02-18 15:33:20 +03:00
if ( ( hard_iface ) & &
( forw_packet - > if_incoming ! = hard_iface ) )
2010-12-13 14:19:28 +03:00
continue ;
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
2012-05-12 04:09:43 +04:00
/* send_outstanding_bat_packet() will lock the list to
2010-12-13 14:19:28 +03:00
* delete the item from the list
*/
2011-05-11 22:59:06 +04:00
pending = cancel_delayed_work_sync ( & forw_packet - > delayed_work ) ;
2010-12-13 14:19:28 +03:00
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
2011-05-11 22:59:06 +04:00
if ( pending ) {
hlist_del ( & forw_packet - > list ) ;
2012-05-16 22:23:14 +04:00
batadv_forw_packet_free ( forw_packet ) ;
2011-05-11 22:59:06 +04:00
}
2010-12-13 14:19:28 +03:00
}
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
}