2017-11-19 17:05:11 +03:00
// SPDX-License-Identifier: GPL-2.0
2021-01-01 02:00:01 +03:00
/* Copyright (C) B.A.T.M.A.N. contributors:
2011-07-30 14:04:12 +04:00
*
* Marek Lindner , Simon Wunderlich
*/
2016-05-15 12:07:46 +03:00
# include "bat_iv_ogm.h"
2011-07-30 14:04:12 +04:00
# include "main.h"
2015-04-17 20:40:28 +03:00
# include <linux/atomic.h>
# include <linux/bitmap.h>
# include <linux/bitops.h>
# include <linux/bug.h>
# include <linux/byteorder/generic.h>
# include <linux/cache.h>
# include <linux/errno.h>
# include <linux/etherdevice.h>
2017-11-19 19:12:02 +03:00
# include <linux/gfp.h>
2015-04-17 20:40:28 +03:00
# include <linux/if_ether.h>
# include <linux/init.h>
# include <linux/jiffies.h>
2016-05-02 20:45:34 +03:00
# include <linux/kernel.h>
2016-01-16 12:29:53 +03:00
# include <linux/kref.h>
2016-05-15 12:07:42 +03:00
# include <linux/list.h>
2019-10-03 18:02:01 +03:00
# include <linux/lockdep.h>
# include <linux/mutex.h>
2015-04-17 20:40:28 +03:00
# include <linux/netdevice.h>
2016-07-03 14:31:40 +03:00
# include <linux/netlink.h>
2015-04-17 20:40:28 +03:00
# include <linux/pkt_sched.h>
2020-08-17 09:09:48 +03:00
# include <linux/prandom.h>
2015-04-17 20:40:28 +03:00
# include <linux/printk.h>
# include <linux/random.h>
# include <linux/rculist.h>
# include <linux/rcupdate.h>
# include <linux/skbuff.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/stddef.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/workqueue.h>
2016-07-03 14:31:40 +03:00
# include <net/genetlink.h>
# include <net/netlink.h>
2017-12-21 12:17:41 +03:00
# include <uapi/linux/batadv_packet.h>
2016-07-03 14:31:40 +03:00
# include <uapi/linux/batman_adv.h>
2015-04-17 20:40:28 +03:00
2016-05-15 12:07:46 +03:00
# include "bat_algo.h"
2015-04-17 20:40:28 +03:00
# include "bitarray.h"
2016-07-03 13:46:33 +03:00
# include "gateway_client.h"
2015-04-17 20:40:28 +03:00
# include "hard-interface.h"
# include "hash.h"
2016-05-16 00:48:31 +03:00
# include "log.h"
2016-07-03 14:31:40 +03:00
# include "netlink.h"
2015-04-17 20:40:28 +03:00
# include "network-coding.h"
2011-07-30 14:04:12 +04:00
# include "originator.h"
# include "routing.h"
# include "send.h"
2015-04-17 20:40:28 +03:00
# include "translation-table.h"
2016-05-15 12:07:43 +03:00
# include "tvlv.h"
2011-07-30 14:04:12 +04:00
2016-05-02 20:45:34 +03:00
static void batadv_iv_send_outstanding_bat_ogm_packet ( struct work_struct * work ) ;
2013-08-17 14:44:44 +04:00
/**
2014-07-15 11:41:05 +04:00
* enum batadv_dup_status - duplicate status
2013-08-17 14:44:44 +04:00
*/
enum batadv_dup_status {
2017-12-02 21:51:48 +03:00
/** @BATADV_NO_DUP: the packet is no duplicate */
2013-08-17 14:44:44 +04:00
BATADV_NO_DUP = 0 ,
2017-12-02 21:51:48 +03:00
/**
* @ BATADV_ORIG_DUP : OGM is a duplicate in the originator ( but not for
* the neighbor )
*/
2013-08-17 14:44:44 +04:00
BATADV_ORIG_DUP ,
2017-12-02 21:51:48 +03:00
/** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
2013-08-17 14:44:44 +04:00
BATADV_NEIGH_DUP ,
2017-12-02 21:51:48 +03:00
/**
* @ BATADV_PROTECTED : originator is currently protected ( after reboot )
*/
2013-08-17 14:44:44 +04:00
BATADV_PROTECTED ,
} ;
2013-04-08 11:38:12 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_ring_buffer_set ( ) - update the ring buffer with the given value
2013-04-08 11:38:12 +04:00
* @ lq_recv : pointer to the ring buffer
* @ lq_index : index to store the value at
* @ value : value to store in the ring buffer
*/
2015-05-26 19:34:26 +03:00
static void batadv_ring_buffer_set ( u8 lq_recv [ ] , u8 * lq_index , u8 value )
2013-04-08 11:38:12 +04:00
{
lq_recv [ * lq_index ] = value ;
* lq_index = ( * lq_index + 1 ) % BATADV_TQ_GLOBAL_WINDOW_SIZE ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_ring_buffer_avg ( ) - compute the average of all non - zero values stored
2013-04-08 11:38:12 +04:00
* in the given ring buffer
* @ lq_recv : pointer to the ring buffer
*
2015-09-15 20:00:48 +03:00
* Return : computed average value .
2013-04-08 11:38:12 +04:00
*/
2015-05-26 19:34:26 +03:00
static u8 batadv_ring_buffer_avg ( const u8 lq_recv [ ] )
2013-04-08 11:38:12 +04:00
{
2015-05-26 19:34:26 +03:00
const u8 * ptr ;
u16 count = 0 ;
u16 i = 0 ;
u16 sum = 0 ;
2013-04-08 11:38:12 +04:00
ptr = lq_recv ;
while ( i < BATADV_TQ_GLOBAL_WINDOW_SIZE ) {
if ( * ptr ! = 0 ) {
count + + ;
sum + = * ptr ;
}
i + + ;
ptr + + ;
}
if ( count = = 0 )
return 0 ;
2015-05-26 19:34:26 +03:00
return ( u8 ) ( sum / count ) ;
2013-04-08 11:38:12 +04:00
}
2013-06-20 03:49:39 +04:00
2013-09-02 14:15:02 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_get ( ) - retrieve or create ( if does not exist ) an
* originator
2013-09-02 14:15:02 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ addr : mac address of the originator
*
2015-09-15 20:00:48 +03:00
* Return : the originator object corresponding to the passed mac address or NULL
2013-09-02 14:15:02 +04:00
* on failure .
2020-06-01 21:13:21 +03:00
* If the object does not exist , it is created and initialised .
2013-09-02 14:15:02 +04:00
*/
static struct batadv_orig_node *
2015-05-26 19:34:26 +03:00
batadv_iv_ogm_orig_get ( struct batadv_priv * bat_priv , const u8 * addr )
2013-09-02 14:15:02 +04:00
{
struct batadv_orig_node * orig_node ;
2017-12-26 17:14:01 +03:00
int hash_added ;
2013-09-02 14:15:02 +04:00
orig_node = batadv_orig_hash_find ( bat_priv , addr ) ;
if ( orig_node )
return orig_node ;
orig_node = batadv_orig_node_new ( bat_priv , addr ) ;
if ( ! orig_node )
return NULL ;
spin_lock_init ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
2016-07-15 18:39:21 +03:00
kref_get ( & orig_node - > refcount ) ;
2013-09-02 14:15:02 +04:00
hash_added = batadv_hash_add ( bat_priv - > orig_hash , batadv_compare_orig ,
batadv_choose_orig , orig_node ,
& orig_node - > hash_entry ) ;
if ( hash_added ! = 0 )
2016-07-15 18:39:21 +03:00
goto free_orig_node_hash ;
2013-09-02 14:15:02 +04:00
return orig_node ;
2016-07-15 18:39:21 +03:00
free_orig_node_hash :
2018-08-16 17:54:45 +03:00
/* reference for batadv_hash_add */
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2018-08-16 17:54:45 +03:00
/* reference from batadv_orig_node_new */
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2013-09-02 14:15:02 +04:00
return NULL ;
}
2012-06-06 00:31:31 +04:00
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new ( struct batadv_hard_iface * hard_iface ,
2015-05-26 19:34:26 +03:00
const u8 * neigh_addr ,
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ,
2013-03-25 16:49:46 +04:00
struct batadv_orig_node * orig_neigh )
2012-03-01 11:35:21 +04:00
{
2015-07-25 23:57:43 +03:00
struct batadv_neigh_node * neigh_node ;
2012-03-01 11:35:21 +04:00
2016-05-02 20:52:08 +03:00
neigh_node = batadv_neigh_node_get_or_create ( orig_node ,
hard_iface , neigh_addr ) ;
2012-03-01 11:35:21 +04:00
if ( ! neigh_node )
goto out ;
2013-11-13 22:14:46 +04:00
neigh_node - > orig_node = orig_neigh ;
2012-03-01 11:35:21 +04:00
out :
return neigh_node ;
}
2012-06-06 00:31:31 +04:00
static int batadv_iv_ogm_iface_enable ( struct batadv_hard_iface * hard_iface )
2011-07-30 14:33:33 +04:00
{
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2012-08-02 19:20:26 +04:00
unsigned char * ogm_buff ;
2015-05-26 19:34:26 +03:00
u32 random_seqno ;
2012-02-07 13:20:46 +04:00
2019-10-03 18:02:01 +03:00
mutex_lock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2012-02-07 13:20:46 +04:00
/* randomize initial seqno to avoid collision */
get_random_bytes ( & random_seqno , sizeof ( random_seqno ) ) ;
2012-08-02 19:20:26 +04:00
atomic_set ( & hard_iface - > bat_iv . ogm_seqno , random_seqno ) ;
2011-07-30 14:33:33 +04:00
2012-08-02 19:20:26 +04:00
hard_iface - > bat_iv . ogm_buff_len = BATADV_OGM_HLEN ;
ogm_buff = kmalloc ( hard_iface - > bat_iv . ogm_buff_len , GFP_ATOMIC ) ;
2019-10-03 18:02:01 +03:00
if ( ! ogm_buff ) {
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2014-12-26 14:41:26 +03:00
return - ENOMEM ;
2019-10-03 18:02:01 +03:00
}
2012-02-07 13:20:48 +04:00
2012-08-02 19:20:26 +04:00
hard_iface - > bat_iv . ogm_buff = ogm_buff ;
batadv_ogm_packet = ( struct batadv_ogm_packet * ) ogm_buff ;
2013-12-02 23:38:31 +04:00
batadv_ogm_packet - > packet_type = BATADV_IV_OGM ;
batadv_ogm_packet - > version = BATADV_COMPAT_VERSION ;
batadv_ogm_packet - > ttl = 2 ;
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags = BATADV_NO_FLAGS ;
2013-04-23 17:39:58 +04:00
batadv_ogm_packet - > reserved = 0 ;
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > tq = BATADV_TQ_MAX_VALUE ;
2012-02-07 13:20:48 +04:00
2019-10-03 18:02:01 +03:00
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2014-12-26 14:41:26 +03:00
return 0 ;
2011-07-30 14:33:33 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_iface_disable ( struct batadv_hard_iface * hard_iface )
2012-02-07 13:20:47 +04:00
{
2019-10-03 18:02:01 +03:00
mutex_lock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2012-08-02 19:20:26 +04:00
kfree ( hard_iface - > bat_iv . ogm_buff ) ;
hard_iface - > bat_iv . ogm_buff = NULL ;
2019-10-03 18:02:01 +03:00
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2012-02-07 13:20:47 +04:00
}
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_iface_update_mac ( struct batadv_hard_iface * hard_iface )
2011-07-30 14:33:33 +04:00
{
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2019-10-03 18:02:01 +03:00
void * ogm_buff ;
2011-07-30 14:33:33 +04:00
2019-10-03 18:02:01 +03:00
mutex_lock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
ogm_buff = hard_iface - > bat_iv . ogm_buff ;
if ( ! ogm_buff )
goto unlock ;
batadv_ogm_packet = ogm_buff ;
2014-01-22 03:42:11 +04:00
ether_addr_copy ( batadv_ogm_packet - > orig ,
hard_iface - > net_dev - > dev_addr ) ;
ether_addr_copy ( batadv_ogm_packet - > prev_sender ,
hard_iface - > net_dev - > dev_addr ) ;
2019-10-03 18:02:01 +03:00
unlock :
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2011-07-30 14:33:33 +04:00
}
2012-06-06 00:31:31 +04:00
static void
batadv_iv_ogm_primary_iface_set ( struct batadv_hard_iface * hard_iface )
2011-07-30 14:33:33 +04:00
{
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2019-10-03 18:02:01 +03:00
void * ogm_buff ;
2011-07-30 14:33:33 +04:00
2019-10-03 18:02:01 +03:00
mutex_lock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
ogm_buff = hard_iface - > bat_iv . ogm_buff ;
if ( ! ogm_buff )
goto unlock ;
batadv_ogm_packet = ogm_buff ;
2013-12-02 23:38:31 +04:00
batadv_ogm_packet - > ttl = BATADV_TTL ;
2019-10-03 18:02:01 +03:00
unlock :
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2011-07-30 14:33:33 +04:00
}
2011-08-03 11:09:30 +04:00
/* when do we schedule our own ogm to be sent */
2012-05-12 20:33:51 +04:00
static unsigned long
2012-06-06 00:31:31 +04:00
batadv_iv_ogm_emit_send_time ( const struct batadv_priv * bat_priv )
2011-08-03 11:09:30 +04:00
{
2012-06-04 00:19:17 +04:00
unsigned int msecs ;
msecs = atomic_read ( & bat_priv - > orig_interval ) - BATADV_JITTER ;
2020-04-13 22:23:29 +03:00
msecs + = prandom_u32_max ( 2 * BATADV_JITTER ) ;
2012-06-04 00:19:17 +04:00
return jiffies + msecs_to_jiffies ( msecs ) ;
2011-08-03 11:09:30 +04:00
}
/* when do we schedule a ogm packet to be sent */
2012-05-12 20:33:51 +04:00
static unsigned long batadv_iv_ogm_fwd_send_time ( void )
2011-08-03 11:09:30 +04:00
{
2020-04-13 22:23:29 +03:00
return jiffies + msecs_to_jiffies ( prandom_u32_max ( BATADV_JITTER / 2 ) ) ;
2011-08-03 11:09:30 +04:00
}
/* apply hop penalty for a normal link */
2015-05-26 19:34:26 +03:00
static u8 batadv_hop_penalty ( u8 tq , const struct batadv_priv * bat_priv )
2011-08-03 11:09:30 +04:00
{
int hop_penalty = atomic_read ( & bat_priv - > hop_penalty ) ;
2012-06-04 00:19:17 +04:00
int new_tq ;
new_tq = tq * ( BATADV_TQ_MAX_VALUE - hop_penalty ) ;
new_tq / = BATADV_TQ_MAX_VALUE ;
return new_tq ;
2011-08-03 11:09:30 +04:00
}
2015-11-23 21:57:21 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_aggr_packet ( ) - checks if there is another OGM attached
2015-11-23 21:57:21 +03:00
* @ buff_pos : current position in the skb
* @ packet_len : total length of the skb
2019-08-22 09:55:36 +03:00
* @ ogm_packet : potential OGM in buffer
2015-11-23 21:57:21 +03:00
*
* Return : true if there is enough space for another OGM , false otherwise .
*/
2019-08-22 09:55:36 +03:00
static bool
batadv_iv_ogm_aggr_packet ( int buff_pos , int packet_len ,
const struct batadv_ogm_packet * ogm_packet )
2011-07-30 14:04:12 +04:00
{
2012-05-12 04:09:39 +04:00
int next_buff_pos = 0 ;
2019-08-22 09:55:36 +03:00
/* check if there is enough space for the header */
next_buff_pos + = buff_pos + sizeof ( * ogm_packet ) ;
if ( next_buff_pos > packet_len )
return false ;
/* check if there is enough space for the optional TVLV */
next_buff_pos + = ntohs ( ogm_packet - > tvlv_len ) ;
2011-07-30 14:04:12 +04:00
return ( next_buff_pos < = packet_len ) & &
2012-06-04 00:19:17 +04:00
( next_buff_pos < = BATADV_MAX_AGGREGATION_BYTES ) ;
2011-07-30 14:04:12 +04:00
}
2011-08-03 11:09:30 +04:00
/* send a batman ogm to a given interface */
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_send_to_if ( struct batadv_forw_packet * forw_packet ,
struct batadv_hard_iface * hard_iface )
2011-08-03 11:09:30 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2014-12-26 14:41:28 +03:00
const char * fwd_str ;
2015-05-26 19:34:26 +03:00
u8 packet_num ;
s16 buff_pos ;
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2011-08-03 11:09:30 +04:00
struct sk_buff * skb ;
2015-05-26 19:34:26 +03:00
u8 * packet_pos ;
2011-08-03 11:09:30 +04:00
2012-06-04 00:19:19 +04:00
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
2011-08-03 11:09:30 +04:00
return ;
packet_num = 0 ;
buff_pos = 0 ;
2012-07-08 20:33:51 +04:00
packet_pos = forw_packet - > skb - > data ;
batadv_ogm_packet = ( struct batadv_ogm_packet * ) packet_pos ;
2011-08-03 11:09:30 +04:00
/* adjust all flags and log packets */
2012-05-12 20:33:51 +04:00
while ( batadv_iv_ogm_aggr_packet ( buff_pos , forw_packet - > packet_len ,
2019-08-22 09:55:36 +03:00
batadv_ogm_packet ) ) {
2011-08-03 11:09:30 +04:00
/* we might have aggregated direct link packets with an
2012-05-12 04:09:43 +04:00
* ordinary base packet
*/
2012-07-08 18:32:09 +04:00
if ( forw_packet - > direct_link_flags & BIT ( packet_num ) & &
forw_packet - > if_incoming = = hard_iface )
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags | = BATADV_DIRECTLINK ;
2011-08-03 11:09:30 +04:00
else
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags & = ~ BATADV_DIRECTLINK ;
2011-08-03 11:09:30 +04:00
2012-07-08 20:33:51 +04:00
if ( packet_num > 0 | | ! forw_packet - > own )
fwd_str = " Forwarding " ;
else
fwd_str = " Sending own " ;
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2013-04-23 17:40:01 +04:00
" %s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM] \n " ,
2012-05-12 15:48:58 +04:00
fwd_str , ( packet_num > 0 ? " aggregated " : " " ) ,
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > orig ,
ntohl ( batadv_ogm_packet - > seqno ) ,
2013-12-02 23:38:31 +04:00
batadv_ogm_packet - > tq , batadv_ogm_packet - > ttl ,
2015-04-23 19:22:24 +03:00
( ( batadv_ogm_packet - > flags & BATADV_DIRECTLINK ) ?
2012-05-12 15:48:58 +04:00
" on " : " off " ) ,
2013-04-23 17:40:01 +04:00
hard_iface - > net_dev - > name ,
2012-05-12 15:48:58 +04:00
hard_iface - > net_dev - > dev_addr ) ;
2011-08-03 11:09:30 +04:00
2012-06-04 00:19:13 +04:00
buff_pos + = BATADV_OGM_HLEN ;
2013-04-23 17:39:57 +04:00
buff_pos + = ntohs ( batadv_ogm_packet - > tvlv_len ) ;
2011-08-03 11:09:30 +04:00
packet_num + + ;
2012-07-08 20:33:51 +04:00
packet_pos = forw_packet - > skb - > data + buff_pos ;
batadv_ogm_packet = ( struct batadv_ogm_packet * ) packet_pos ;
2011-08-03 11:09:30 +04:00
}
/* create clone because function is called more than once */
skb = skb_clone ( forw_packet - > skb , GFP_ATOMIC ) ;
2012-04-20 19:02:45 +04:00
if ( skb ) {
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_MGMT_TX ) ;
batadv_add_counter ( bat_priv , BATADV_CNT_MGMT_TX_BYTES ,
2012-04-20 19:02:45 +04:00
skb - > len + ETH_HLEN ) ;
2016-01-16 11:40:15 +03:00
batadv_send_broadcast_skb ( skb , hard_iface ) ;
2012-04-20 19:02:45 +04:00
}
2011-08-03 11:09:30 +04:00
}
/* send a batman ogm packet */
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_emit ( struct batadv_forw_packet * forw_packet )
2011-08-03 11:09:30 +04:00
{
struct net_device * soft_iface ;
if ( ! forw_packet - > if_incoming ) {
2012-03-07 12:07:45 +04:00
pr_err ( " Error - can't forward packet: incoming iface not specified \n " ) ;
2016-06-14 23:56:50 +03:00
return ;
2011-08-03 11:09:30 +04:00
}
soft_iface = forw_packet - > if_incoming - > soft_iface ;
2013-11-13 22:14:49 +04:00
if ( WARN_ON ( ! forw_packet - > if_outgoing ) )
2016-06-14 23:56:50 +03:00
return ;
2011-08-03 11:09:30 +04:00
2021-05-18 22:00:27 +03:00
if ( forw_packet - > if_outgoing - > soft_iface ! = soft_iface ) {
pr_warn ( " %s: soft interface switch for queued OGM \n " , __func__ ) ;
2016-06-14 23:56:50 +03:00
return ;
2021-05-18 22:00:27 +03:00
}
2011-08-03 11:09:30 +04:00
2013-11-13 22:14:49 +04:00
if ( forw_packet - > if_incoming - > if_status ! = BATADV_IF_ACTIVE )
2016-06-14 23:56:50 +03:00
return ;
2011-08-03 11:09:30 +04:00
2013-11-13 22:14:49 +04:00
/* only for one specific outgoing interface */
batadv_iv_ogm_send_to_if ( forw_packet , forw_packet - > if_outgoing ) ;
2011-08-03 11:09:30 +04:00
}
2013-11-13 22:14:49 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_can_aggregate ( ) - find out if an OGM can be aggregated on an
2013-11-13 22:14:49 +04:00
* existing forward packet
* @ new_bat_ogm_packet : OGM packet to be aggregated
* @ bat_priv : the bat priv with all the soft interface information
* @ packet_len : ( total ) length of the OGM
* @ send_time : timestamp ( jiffies ) when the packet is to be sent
2014-07-15 11:41:05 +04:00
* @ directlink : true if this is a direct link packet
2013-11-13 22:14:49 +04:00
* @ if_incoming : interface where the packet was received
* @ if_outgoing : interface for which the retransmission should be considered
* @ forw_packet : the forwarded packet which should be checked
*
2015-09-15 20:00:48 +03:00
* Return : true if new_packet can be aggregated with forw_packet
2013-11-13 22:14:49 +04:00
*/
2012-05-12 20:33:51 +04:00
static bool
2012-06-06 00:31:30 +04:00
batadv_iv_ogm_can_aggregate ( const struct batadv_ogm_packet * new_bat_ogm_packet ,
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv ,
2012-05-12 20:33:51 +04:00
int packet_len , unsigned long send_time ,
bool directlink ,
2012-06-06 00:31:31 +04:00
const struct batadv_hard_iface * if_incoming ,
2013-11-13 22:14:49 +04:00
const struct batadv_hard_iface * if_outgoing ,
2012-06-06 00:31:31 +04:00
const struct batadv_forw_packet * forw_packet )
2011-08-03 11:09:30 +04:00
{
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2011-08-03 11:09:30 +04:00
int aggregated_bytes = forw_packet - > packet_len + packet_len ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * primary_if = NULL ;
2011-08-03 11:09:30 +04:00
bool res = false ;
2012-06-04 00:19:17 +04:00
unsigned long aggregation_end_time ;
2011-08-03 11:09:30 +04:00
2012-06-06 00:31:30 +04:00
batadv_ogm_packet = ( struct batadv_ogm_packet * ) forw_packet - > skb - > data ;
2012-06-04 00:19:17 +04:00
aggregation_end_time = send_time ;
aggregation_end_time + = msecs_to_jiffies ( BATADV_MAX_AGGREGATION_MS ) ;
2011-08-03 11:09:30 +04:00
2012-05-12 04:09:43 +04:00
/* we can aggregate the current packet to this aggregated packet
2011-08-03 11:09:30 +04:00
* if :
*
* - the send time is within our MAX_AGGREGATION_MS time
2021-03-30 22:15:26 +03:00
* - the resulting packet won ' t be bigger than
2011-08-03 11:09:30 +04:00
* MAX_AGGREGATION_BYTES
2014-12-26 14:41:29 +03:00
* otherwise aggregation is not possible
2011-08-03 11:09:30 +04:00
*/
2014-12-26 14:41:29 +03:00
if ( ! time_before ( send_time , forw_packet - > send_time ) | |
! time_after_eq ( aggregation_end_time , forw_packet - > send_time ) )
return false ;
if ( aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES )
return false ;
/* packet is not leaving on the same interface. */
if ( forw_packet - > if_outgoing ! = if_outgoing )
return false ;
/* check aggregation compatibility
* - > direct link packets are broadcasted on
* their interface only
* - > aggregate packet if the current packet is
* a " global " packet as well as the base
* packet
*/
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if )
return false ;
2013-11-13 22:14:49 +04:00
2014-12-26 14:41:29 +03:00
/* packets without direct link flag and high TTL
* are flooded through the net
*/
if ( ! directlink & &
! ( batadv_ogm_packet - > flags & BATADV_DIRECTLINK ) & &
batadv_ogm_packet - > ttl ! = 1 & &
/* own packets originating non-primary
* interfaces leave only that interface
*/
( ! forw_packet - > own | |
forw_packet - > if_incoming = = primary_if ) ) {
res = true ;
goto out ;
}
2011-08-03 11:09:30 +04:00
2014-12-26 14:41:29 +03:00
/* if the incoming packet is sent via this one
* interface only - we still can aggregate
*/
if ( directlink & &
new_bat_ogm_packet - > ttl = = 1 & &
forw_packet - > if_incoming = = if_incoming & &
/* packets from direct neighbors or
* own secondary interface packets
* ( = secondary interface packets in general )
*/
( batadv_ogm_packet - > flags & BATADV_DIRECTLINK | |
( forw_packet - > own & &
forw_packet - > if_incoming ! = primary_if ) ) ) {
res = true ;
goto out ;
2011-08-03 11:09:30 +04:00
}
out :
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( primary_if ) ;
2011-08-03 11:09:30 +04:00
return res ;
}
2014-01-16 00:17:54 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_aggregate_new ( ) - create a new aggregated packet and add this
2013-11-13 22:14:49 +04:00
* packet to it .
* @ packet_buff : pointer to the OGM
* @ packet_len : ( total ) length of the OGM
* @ send_time : timestamp ( jiffies ) when the packet is to be sent
* @ direct_link : whether this OGM has direct link status
* @ if_incoming : interface where the packet was received
* @ if_outgoing : interface for which the retransmission should be considered
* @ own_packet : true if it is a self - generated ogm
*/
2012-05-12 20:33:51 +04:00
static void batadv_iv_ogm_aggregate_new ( const unsigned char * packet_buff ,
int packet_len , unsigned long send_time ,
bool direct_link ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming ,
2013-11-13 22:14:49 +04:00
struct batadv_hard_iface * if_outgoing ,
2012-05-12 20:33:51 +04:00
int own_packet )
2011-08-03 11:09:30 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
struct batadv_forw_packet * forw_packet_aggr ;
2017-02-17 13:17:06 +03:00
struct sk_buff * skb ;
2011-08-03 11:09:30 +04:00
unsigned char * skb_buff ;
2012-06-04 00:19:17 +04:00
unsigned int skb_size ;
2016-06-20 22:39:54 +03:00
atomic_t * queue_left = own_packet ? NULL : & bat_priv - > batman_queue_left ;
2011-08-03 11:09:30 +04:00
2014-12-26 14:41:31 +03:00
if ( atomic_read ( & bat_priv - > aggregated_ogms ) & &
packet_len < BATADV_MAX_AGGREGATION_BYTES )
2012-11-04 20:11:45 +04:00
skb_size = BATADV_MAX_AGGREGATION_BYTES ;
2011-08-03 11:09:30 +04:00
else
2012-11-04 20:11:45 +04:00
skb_size = packet_len ;
2013-04-03 00:28:44 +04:00
skb_size + = ETH_HLEN ;
2011-08-03 11:09:30 +04:00
2017-02-17 13:17:06 +03:00
skb = netdev_alloc_skb_ip_align ( NULL , skb_size ) ;
if ( ! skb )
return ;
forw_packet_aggr = batadv_forw_packet_alloc ( if_incoming , if_outgoing ,
queue_left , bat_priv , skb ) ;
if ( ! forw_packet_aggr ) {
kfree_skb ( skb ) ;
2016-06-20 22:39:54 +03:00
return ;
}
2013-07-29 19:56:44 +04:00
forw_packet_aggr - > skb - > priority = TC_PRIO_CONTROL ;
2013-04-03 00:28:44 +04:00
skb_reserve ( forw_packet_aggr - > skb , ETH_HLEN ) ;
2011-08-03 11:09:30 +04:00
skb_buff = skb_put ( forw_packet_aggr - > skb , packet_len ) ;
forw_packet_aggr - > packet_len = packet_len ;
memcpy ( skb_buff , packet_buff , packet_len ) ;
forw_packet_aggr - > own = own_packet ;
2012-06-04 00:19:17 +04:00
forw_packet_aggr - > direct_link_flags = BATADV_NO_FLAGS ;
2011-08-03 11:09:30 +04:00
forw_packet_aggr - > send_time = send_time ;
/* save packet direct link flag status */
if ( direct_link )
forw_packet_aggr - > direct_link_flags | = 1 ;
INIT_DELAYED_WORK ( & forw_packet_aggr - > delayed_work ,
2016-05-02 20:45:34 +03:00
batadv_iv_send_outstanding_bat_ogm_packet ) ;
batman-adv: fix rare race conditions on interface removal
In rare cases during shutdown the following general protection fault can
happen:
general protection fault: 0000 [#1] SMP
Modules linked in: batman_adv(O-) [...]
CPU: 3 PID: 1714 Comm: rmmod Tainted: G O 4.6.0-rc6+ #1
[...]
Call Trace:
[<ffffffffa0363294>] batadv_hardif_disable_interface+0x29a/0x3a6 [batman_adv]
[<ffffffffa0373db4>] batadv_softif_destroy_netlink+0x4b/0xa4 [batman_adv]
[<ffffffff813b52f3>] __rtnl_link_unregister+0x48/0x92
[<ffffffff813b9240>] rtnl_link_unregister+0xc1/0xdb
[<ffffffff8108547c>] ? bit_waitqueue+0x87/0x87
[<ffffffffa03850d2>] batadv_exit+0x1a/0xf48 [batman_adv]
[<ffffffff810c26f9>] SyS_delete_module+0x136/0x1b0
[<ffffffff8144dc65>] entry_SYSCALL_64_fastpath+0x18/0xa8
[<ffffffff8108aaca>] ? trace_hardirqs_off_caller+0x37/0xa6
Code: 89 f7 e8 21 bd 0d e1 4d 85 e4 75 0e 31 f6 48 c7 c7 50 d7 3b a0 e8 50 16 f2 e0 49 8b 9c 24 28 01 00 00 48 85 db 0f 84 b2 00 00 00 <48> 8b 03 4d 85 ed 48 89 45 c8 74 09 4c 39 ab f8 00 00 00 75 1c
RIP [<ffffffffa0371852>] batadv_purge_outstanding_packets+0x1c8/0x291 [batman_adv]
RSP <ffff88001da5fd78>
---[ end trace 803b9bdc6a4a952b ]---
Kernel panic - not syncing: Fatal exception in interrupt
Kernel Offset: disabled
---[ end Kernel panic - not syncing: Fatal exception in interrupt
It does not happen often, but may potentially happen when frequently
shutting down and reinitializing an interface. With some carefully
placed msleep()s/mdelay()s it can be reproduced easily.
The issue is, that on interface removal, any still running worker thread
of a forwarding packet will race with the interface purging routine to
free a forwarding packet. Temporarily giving up a spin-lock to be able
to sleep in the purging routine is not safe.
Furthermore, there is a potential general protection fault not just for
the purging side shown above, but also on the worker side: Temporarily
removing a forw_packet from the according forw_{bcast,bat}_list will make
it impossible for the purging routine to catch and cancel it.
# How this patch tries to fix it:
With this patch we split the queue purging into three steps: Step 1),
removing forward packets from the queue of an interface and by that
claim it as our responsibility to free.
Step 2), we are either lucky to cancel a pending worker before it starts
to run. Or if it is already running, we wait and let it do its thing,
except two things:
Through the claiming in step 1) we prevent workers from a) re-arming
themselves. And b) prevent workers from freeing packets which we still
hold in the interface purging routine.
Finally, step 3, we are sure that no forwarding packets are pending or
even running anymore on the interface to remove. We can then safely free
the claimed forwarding packets.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-11-01 11:44:44 +03:00
batadv_forw_packet_ogmv1_queue ( bat_priv , forw_packet_aggr , send_time ) ;
2011-08-03 11:09:30 +04:00
}
/* aggregate a new packet into the existing ogm packet */
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_aggregate ( struct batadv_forw_packet * forw_packet_aggr ,
2012-05-12 20:33:51 +04:00
const unsigned char * packet_buff ,
int packet_len , bool direct_link )
2011-08-03 11:09:30 +04:00
{
2012-07-08 18:32:09 +04:00
unsigned long new_direct_link_flag ;
2011-08-03 11:09:30 +04:00
2017-06-18 10:59:28 +03:00
skb_put_data ( forw_packet_aggr - > skb , packet_buff , packet_len ) ;
2011-08-03 11:09:30 +04:00
forw_packet_aggr - > packet_len + = packet_len ;
forw_packet_aggr - > num_packets + + ;
/* save packet direct link flag status */
2012-07-08 18:32:09 +04:00
if ( direct_link ) {
new_direct_link_flag = BIT ( forw_packet_aggr - > num_packets ) ;
forw_packet_aggr - > direct_link_flags | = new_direct_link_flag ;
}
2011-08-03 11:09:30 +04:00
}
2013-11-13 22:14:49 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_queue_add ( ) - queue up an OGM for transmission
2013-11-13 22:14:49 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ packet_buff : pointer to the OGM
* @ packet_len : ( total ) length of the OGM
* @ if_incoming : interface where the packet was received
* @ if_outgoing : interface for which the retransmission should be considered
* @ own_packet : true if it is a self - generated ogm
* @ send_time : timestamp ( jiffies ) when the packet is to be sent
*/
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_queue_add ( struct batadv_priv * bat_priv ,
2012-05-12 20:33:51 +04:00
unsigned char * packet_buff ,
int packet_len ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming ,
2013-11-13 22:14:49 +04:00
struct batadv_hard_iface * if_outgoing ,
2012-05-12 20:33:51 +04:00
int own_packet , unsigned long send_time )
2011-08-03 11:09:30 +04:00
{
2012-05-12 04:09:43 +04:00
/* _aggr -> pointer to the packet we want to aggregate with
2011-08-03 11:09:30 +04:00
* _pos - > pointer to the position in the queue
*/
2012-06-06 00:31:31 +04:00
struct batadv_forw_packet * forw_packet_aggr = NULL ;
struct batadv_forw_packet * forw_packet_pos = NULL ;
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2011-08-03 11:09:30 +04:00
bool direct_link ;
2012-06-04 00:19:17 +04:00
unsigned long max_aggregation_jiffies ;
2011-08-03 11:09:30 +04:00
2012-06-06 00:31:30 +04:00
batadv_ogm_packet = ( struct batadv_ogm_packet * ) packet_buff ;
2014-12-26 14:41:32 +03:00
direct_link = ! ! ( batadv_ogm_packet - > flags & BATADV_DIRECTLINK ) ;
2012-06-04 00:19:17 +04:00
max_aggregation_jiffies = msecs_to_jiffies ( BATADV_MAX_AGGREGATION_MS ) ;
2011-08-03 11:09:30 +04:00
/* find position for the packet in the forward queue */
spin_lock_bh ( & bat_priv - > forw_bat_list_lock ) ;
/* own packets are not to be aggregated */
2014-12-26 14:41:32 +03:00
if ( atomic_read ( & bat_priv - > aggregated_ogms ) & & ! own_packet ) {
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry ( forw_packet_pos ,
2011-08-03 11:09:30 +04:00
& bat_priv - > forw_bat_list , list ) {
2012-06-06 00:31:30 +04:00
if ( batadv_iv_ogm_can_aggregate ( batadv_ogm_packet ,
2012-05-12 20:33:51 +04:00
bat_priv , packet_len ,
send_time , direct_link ,
if_incoming ,
2013-11-13 22:14:49 +04:00
if_outgoing ,
2012-05-12 20:33:51 +04:00
forw_packet_pos ) ) {
2011-08-03 11:09:30 +04:00
forw_packet_aggr = forw_packet_pos ;
break ;
}
}
}
/* nothing to aggregate with - either aggregation disabled or no
2012-05-12 04:09:43 +04:00
* suitable aggregation packet found
*/
2011-08-03 11:09:30 +04:00
if ( ! forw_packet_aggr ) {
/* the following section can run without the lock */
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
2012-05-12 04:09:43 +04:00
/* if we could not aggregate this packet with one of the others
2011-08-03 11:09:30 +04:00
* we hold it back for a while , so that it might be aggregated
* later on
*/
2012-06-04 00:19:17 +04:00
if ( ! own_packet & & atomic_read ( & bat_priv - > aggregated_ogms ) )
send_time + = max_aggregation_jiffies ;
2011-08-03 11:09:30 +04:00
2012-05-12 20:33:51 +04:00
batadv_iv_ogm_aggregate_new ( packet_buff , packet_len ,
send_time , direct_link ,
2013-11-13 22:14:49 +04:00
if_incoming , if_outgoing ,
own_packet ) ;
2011-08-03 11:09:30 +04:00
} else {
2012-05-12 20:33:51 +04:00
batadv_iv_ogm_aggregate ( forw_packet_aggr , packet_buff ,
packet_len , direct_link ) ;
2011-08-03 11:09:30 +04:00
spin_unlock_bh ( & bat_priv - > forw_bat_list_lock ) ;
}
}
2012-06-06 00:31:31 +04:00
static void batadv_iv_ogm_forward ( struct batadv_orig_node * orig_node ,
2012-05-12 20:33:51 +04:00
const struct ethhdr * ethhdr ,
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ,
2012-05-12 20:33:51 +04:00
bool is_single_hop_neigh ,
bool is_from_best_next_hop ,
2013-11-13 22:14:49 +04:00
struct batadv_hard_iface * if_incoming ,
struct batadv_hard_iface * if_outgoing )
2011-08-03 11:09:30 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
2015-05-26 19:34:26 +03:00
u16 tvlv_len ;
2011-08-03 11:09:30 +04:00
2013-12-02 23:38:31 +04:00
if ( batadv_ogm_packet - > ttl < = 1 ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv , " ttl exceeded \n " ) ;
2011-08-03 11:09:30 +04:00
return ;
}
2012-03-11 02:17:53 +04:00
if ( ! is_from_best_next_hop ) {
/* Mark the forwarded packet when it is not coming from our
* best next hop . We still need to forward the packet for our
* neighbor link quality detection to work in case the packet
* originated from a single hop neighbor . Otherwise we can
* simply drop the ogm .
*/
if ( is_single_hop_neigh )
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags | = BATADV_NOT_BEST_NEXT_HOP ;
2012-03-11 02:17:53 +04:00
else
return ;
}
2011-08-03 11:09:30 +04:00
2013-04-23 17:39:57 +04:00
tvlv_len = ntohs ( batadv_ogm_packet - > tvlv_len ) ;
2011-08-03 11:09:30 +04:00
2013-12-02 23:38:31 +04:00
batadv_ogm_packet - > ttl - - ;
2014-01-22 03:42:11 +04:00
ether_addr_copy ( batadv_ogm_packet - > prev_sender , ethhdr - > h_source ) ;
2011-08-03 11:09:30 +04:00
/* apply hop penalty */
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > tq = batadv_hop_penalty ( batadv_ogm_packet - > tq ,
2012-05-12 20:33:51 +04:00
bat_priv ) ;
2011-08-03 11:09:30 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Forwarding packet: tq: %i, ttl: %i \n " ,
2013-12-02 23:38:31 +04:00
batadv_ogm_packet - > tq , batadv_ogm_packet - > ttl ) ;
2011-08-03 11:09:30 +04:00
2012-03-01 11:35:16 +04:00
if ( is_single_hop_neigh )
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags | = BATADV_DIRECTLINK ;
2011-08-03 11:09:30 +04:00
else
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > flags & = ~ BATADV_DIRECTLINK ;
2011-08-03 11:09:30 +04:00
2012-06-06 00:31:30 +04:00
batadv_iv_ogm_queue_add ( bat_priv , ( unsigned char * ) batadv_ogm_packet ,
2013-04-23 17:39:57 +04:00
BATADV_OGM_HLEN + tvlv_len ,
2013-11-13 22:14:49 +04:00
if_incoming , if_outgoing , 0 ,
batadv_iv_ogm_fwd_send_time ( ) ) ;
2011-08-03 11:09:30 +04:00
}
2013-04-17 19:44:43 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_slide_own_bcast_window ( ) - bitshift own OGM broadcast windows
* for the given interface
2013-04-17 19:44:43 +04:00
* @ hard_iface : the interface for which the windows have to be shifted
*/
static void
batadv_iv_ogm_slide_own_bcast_window ( struct batadv_hard_iface * hard_iface )
{
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
struct hlist_head * head ;
struct batadv_orig_node * orig_node ;
2018-08-16 17:54:45 +03:00
struct batadv_orig_ifinfo * orig_ifinfo ;
2013-04-17 19:44:43 +04:00
unsigned long * word ;
2015-05-26 19:34:26 +03:00
u32 i ;
u8 * w ;
2013-04-17 19:44:43 +04:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( orig_node , head , hash_entry ) {
2018-08-16 17:54:45 +03:00
hlist_for_each_entry_rcu ( orig_ifinfo ,
& orig_node - > ifinfo_list ,
list ) {
if ( orig_ifinfo - > if_outgoing ! = hard_iface )
continue ;
spin_lock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
word = orig_ifinfo - > bat_iv . bcast_own ;
batadv_bit_get_packet ( bat_priv , word , 1 , 0 ) ;
w = & orig_ifinfo - > bat_iv . bcast_own_sum ;
* w = bitmap_weight ( word ,
BATADV_TQ_LOCAL_WINDOW_SIZE ) ;
spin_unlock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
}
2013-04-17 19:44:43 +04:00
}
rcu_read_unlock ( ) ;
}
}
2019-10-03 18:02:01 +03:00
/**
* batadv_iv_ogm_schedule_buff ( ) - schedule submission of hardif ogm buffer
* @ hard_iface : interface whose ogm buffer should be transmitted
*/
static void batadv_iv_ogm_schedule_buff ( struct batadv_hard_iface * hard_iface )
2011-08-03 11:09:30 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2012-08-02 19:20:26 +04:00
unsigned char * * ogm_buff = & hard_iface - > bat_iv . ogm_buff ;
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2013-11-13 22:14:49 +04:00
struct batadv_hard_iface * primary_if , * tmp_hard_iface ;
2012-08-02 19:20:26 +04:00
int * ogm_buff_len = & hard_iface - > bat_iv . ogm_buff_len ;
2015-05-26 19:34:26 +03:00
u32 seqno ;
u16 tvlv_len = 0 ;
2013-11-13 22:14:49 +04:00
unsigned long send_time ;
2011-08-03 11:09:30 +04:00
2019-10-03 18:02:01 +03:00
lockdep_assert_held ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
2016-05-02 20:45:34 +03:00
2020-02-16 15:02:06 +03:00
/* interface already disabled by batadv_iv_ogm_iface_disable */
if ( ! * ogm_buff )
return ;
2016-05-02 20:45:34 +03:00
/* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface ( ) where the originator mac is set and
* outdated packets ( especially uninitialized mac addresses ) in the
* packet queue
*/
if ( hard_iface - > if_status = = BATADV_IF_TO_BE_ACTIVATED )
hard_iface - > if_status = BATADV_IF_ACTIVE ;
2012-05-12 15:48:54 +04:00
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2011-08-03 11:09:30 +04:00
2013-04-23 17:40:01 +04:00
if ( hard_iface = = primary_if ) {
/* tt changes have to be committed before the tvlv data is
* appended as it may alter the tt tvlv container
*/
batadv_tt_local_commit_changes ( bat_priv ) ;
2013-04-23 17:39:57 +04:00
tvlv_len = batadv_tvlv_container_ogm_append ( bat_priv , ogm_buff ,
ogm_buff_len ,
BATADV_OGM_HLEN ) ;
2013-04-23 17:40:01 +04:00
}
2012-05-07 00:22:05 +04:00
2012-08-02 19:20:26 +04:00
batadv_ogm_packet = ( struct batadv_ogm_packet * ) ( * ogm_buff ) ;
2013-04-23 17:39:57 +04:00
batadv_ogm_packet - > tvlv_len = htons ( tvlv_len ) ;
2011-08-03 11:09:30 +04:00
/* change sequence number to network order */
2015-05-26 19:34:26 +03:00
seqno = ( u32 ) atomic_read ( & hard_iface - > bat_iv . ogm_seqno ) ;
2012-07-08 19:13:15 +04:00
batadv_ogm_packet - > seqno = htonl ( seqno ) ;
2012-08-02 19:20:26 +04:00
atomic_inc ( & hard_iface - > bat_iv . ogm_seqno ) ;
2011-08-03 11:09:30 +04:00
2013-04-17 19:44:43 +04:00
batadv_iv_ogm_slide_own_bcast_window ( hard_iface ) ;
2011-08-03 11:09:30 +04:00
2013-11-13 22:14:49 +04:00
send_time = batadv_iv_ogm_emit_send_time ( bat_priv ) ;
if ( hard_iface ! = primary_if ) {
/* OGMs from secondary interfaces are only scheduled on their
* respective interfaces .
*/
batadv_iv_ogm_queue_add ( bat_priv , * ogm_buff , * ogm_buff_len ,
hard_iface , hard_iface , 1 , send_time ) ;
goto out ;
}
/* OGMs from primary interfaces are scheduled on all
* interfaces .
*/
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( tmp_hard_iface , & batadv_hardif_list , list ) {
if ( tmp_hard_iface - > soft_iface ! = hard_iface - > soft_iface )
2015-07-17 11:03:42 +03:00
continue ;
2016-03-05 18:09:16 +03:00
if ( ! kref_get_unless_zero ( & tmp_hard_iface - > refcount ) )
continue ;
2013-11-13 22:14:49 +04:00
batadv_iv_ogm_queue_add ( bat_priv , * ogm_buff ,
* ogm_buff_len , hard_iface ,
tmp_hard_iface , 1 , send_time ) ;
2016-03-05 18:09:16 +03:00
batadv_hardif_put ( tmp_hard_iface ) ;
2013-11-13 22:14:49 +04:00
}
rcu_read_unlock ( ) ;
out :
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( primary_if ) ;
2011-08-03 11:09:30 +04:00
}
2019-10-03 18:02:01 +03:00
static void batadv_iv_ogm_schedule ( struct batadv_hard_iface * hard_iface )
{
if ( hard_iface - > if_status = = BATADV_IF_NOT_IN_USE | |
hard_iface - > if_status = = BATADV_IF_TO_BE_REMOVED )
return ;
mutex_lock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
batadv_iv_ogm_schedule_buff ( hard_iface ) ;
mutex_unlock ( & hard_iface - > bat_iv . ogm_buff_mutex ) ;
}
2018-08-16 17:54:45 +03:00
/**
2020-06-01 21:13:21 +03:00
* batadv_iv_orig_ifinfo_sum ( ) - Get bcast_own sum for originator over interface
2018-08-16 17:54:45 +03:00
* @ orig_node : originator which reproadcasted the OGMs directly
* @ if_outgoing : interface which transmitted the original OGM and received the
* direct rebroadcast
*
* Return : Number of replied ( rebroadcasted ) OGMs which were transmitted by
* an originator and directly ( without intermediate hop ) received by a specific
* interface
*/
static u8 batadv_iv_orig_ifinfo_sum ( struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_orig_ifinfo * orig_ifinfo ;
u8 sum ;
orig_ifinfo = batadv_orig_ifinfo_get ( orig_node , if_outgoing ) ;
if ( ! orig_ifinfo )
return 0 ;
spin_lock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
sum = orig_ifinfo - > bat_iv . bcast_own_sum ;
spin_unlock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
return sum ;
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_update ( ) - use OGM to update corresponding data in an
2013-11-13 22:14:46 +04:00
* originator
* @ bat_priv : the bat priv with all the soft interface information
* @ orig_node : the orig node who originally emitted the ogm packet
2013-11-13 22:14:47 +04:00
* @ orig_ifinfo : ifinfo for the outgoing interface of the orig_node
2013-11-13 22:14:46 +04:00
* @ ethhdr : Ethernet header of the OGM
* @ batadv_ogm_packet : the ogm packet
* @ if_incoming : interface where the packet was received
* @ if_outgoing : interface for which the retransmission should be considered
* @ dup_status : the duplicate status of this ogm packet .
*/
2012-05-12 20:33:51 +04:00
static void
2012-06-06 00:31:31 +04:00
batadv_iv_ogm_orig_update ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
2013-11-13 22:14:47 +04:00
struct batadv_orig_ifinfo * orig_ifinfo ,
2012-05-12 20:33:51 +04:00
const struct ethhdr * ethhdr ,
2012-06-06 00:31:30 +04:00
const struct batadv_ogm_packet * batadv_ogm_packet ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming ,
2013-11-13 22:14:46 +04:00
struct batadv_hard_iface * if_outgoing ,
2013-05-23 15:07:42 +04:00
enum batadv_dup_status dup_status )
2011-07-30 14:04:12 +04:00
{
2013-11-13 22:14:46 +04:00
struct batadv_neigh_ifinfo * neigh_ifinfo = NULL ;
struct batadv_neigh_ifinfo * router_ifinfo = NULL ;
2015-06-09 21:50:49 +03:00
struct batadv_neigh_node * neigh_node = NULL ;
struct batadv_neigh_node * tmp_neigh_node = NULL ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * router = NULL ;
2015-05-26 19:34:26 +03:00
u8 sum_orig , sum_neigh ;
u8 * neigh_addr ;
u8 tq_avg ;
2011-07-30 14:04:12 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2017-05-19 14:02:00 +03:00
" %s(): Searching and updating originator entry of received packet \n " ,
__func__ ) ;
2011-07-30 14:04:12 +04:00
rcu_read_lock ( ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_rcu ( tmp_neigh_node ,
2011-07-30 14:04:12 +04:00
& orig_node - > neigh_list , list ) {
2012-05-12 15:48:58 +04:00
neigh_addr = tmp_neigh_node - > addr ;
if ( batadv_compare_eth ( neigh_addr , ethhdr - > h_source ) & &
tmp_neigh_node - > if_incoming = = if_incoming & &
2016-01-16 12:29:53 +03:00
kref_get_unless_zero ( & tmp_neigh_node - > refcount ) ) {
2013-03-30 20:22:00 +04:00
if ( WARN ( neigh_node , " too many matching neigh_nodes " ) )
2016-01-17 13:01:11 +03:00
batadv_neigh_node_put ( neigh_node ) ;
2011-07-30 14:04:12 +04:00
neigh_node = tmp_neigh_node ;
continue ;
}
2013-05-23 15:07:42 +04:00
if ( dup_status ! = BATADV_NO_DUP )
2011-07-30 14:04:12 +04:00
continue ;
2013-11-13 22:14:46 +04:00
/* only update the entry for this outgoing interface */
neigh_ifinfo = batadv_neigh_ifinfo_get ( tmp_neigh_node ,
if_outgoing ) ;
if ( ! neigh_ifinfo )
continue ;
spin_lock_bh ( & tmp_neigh_node - > ifinfo_lock ) ;
batadv_ring_buffer_set ( neigh_ifinfo - > bat_iv . tq_recv ,
& neigh_ifinfo - > bat_iv . tq_index , 0 ) ;
tq_avg = batadv_ring_buffer_avg ( neigh_ifinfo - > bat_iv . tq_recv ) ;
neigh_ifinfo - > bat_iv . tq_avg = tq_avg ;
spin_unlock_bh ( & tmp_neigh_node - > ifinfo_lock ) ;
2016-01-17 13:01:12 +03:00
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo = NULL ;
2011-07-30 14:04:12 +04:00
}
if ( ! neigh_node ) {
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_tmp ;
2011-07-30 14:04:12 +04:00
2013-09-02 14:15:02 +04:00
orig_tmp = batadv_iv_ogm_orig_get ( bat_priv , ethhdr - > h_source ) ;
2011-07-30 14:04:12 +04:00
if ( ! orig_tmp )
goto unlock ;
2012-05-12 20:33:51 +04:00
neigh_node = batadv_iv_ogm_neigh_new ( if_incoming ,
ethhdr - > h_source ,
2013-03-25 16:49:46 +04:00
orig_node , orig_tmp ) ;
2011-07-30 14:04:12 +04:00
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_tmp ) ;
2011-07-30 14:04:12 +04:00
if ( ! neigh_node )
goto unlock ;
2014-12-26 14:41:33 +03:00
} else {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Updating existing last-hop neighbor of originator \n " ) ;
2014-12-26 14:41:33 +03:00
}
2011-07-30 14:04:12 +04:00
rcu_read_unlock ( ) ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo = batadv_neigh_ifinfo_new ( neigh_node , if_outgoing ) ;
if ( ! neigh_ifinfo )
goto out ;
2011-07-30 14:04:12 +04:00
2012-03-01 11:35:19 +04:00
neigh_node - > last_seen = jiffies ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:46 +04:00
spin_lock_bh ( & neigh_node - > ifinfo_lock ) ;
batadv_ring_buffer_set ( neigh_ifinfo - > bat_iv . tq_recv ,
& neigh_ifinfo - > bat_iv . tq_index ,
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > tq ) ;
2013-11-13 22:14:46 +04:00
tq_avg = batadv_ring_buffer_avg ( neigh_ifinfo - > bat_iv . tq_recv ) ;
neigh_ifinfo - > bat_iv . tq_avg = tq_avg ;
spin_unlock_bh ( & neigh_node - > ifinfo_lock ) ;
2011-07-30 14:04:12 +04:00
2013-05-23 15:07:42 +04:00
if ( dup_status = = BATADV_NO_DUP ) {
2013-11-13 22:14:47 +04:00
orig_ifinfo - > last_ttl = batadv_ogm_packet - > ttl ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo - > last_ttl = batadv_ogm_packet - > ttl ;
2011-07-30 14:04:12 +04:00
}
/* if this neighbor already is our next hop there is nothing
2012-05-12 04:09:43 +04:00
* to change
*/
2013-11-13 22:14:47 +04:00
router = batadv_orig_router_get ( orig_node , if_outgoing ) ;
2011-07-30 14:04:12 +04:00
if ( router = = neigh_node )
2013-04-23 17:40:01 +04:00
goto out ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:46 +04:00
if ( router ) {
router_ifinfo = batadv_neigh_ifinfo_get ( router , if_outgoing ) ;
if ( ! router_ifinfo )
goto out ;
/* if this neighbor does not offer a better TQ we won't
* consider it
*/
if ( router_ifinfo - > bat_iv . tq_avg > neigh_ifinfo - > bat_iv . tq_avg )
goto out ;
}
2011-07-30 14:04:12 +04:00
/* if the TQ is the same and the link not more symmetric we
2012-05-12 04:09:43 +04:00
* won ' t consider it either
*/
2013-11-13 22:14:46 +04:00
if ( router_ifinfo & &
2014-12-26 14:41:30 +03:00
neigh_ifinfo - > bat_iv . tq_avg = = router_ifinfo - > bat_iv . tq_avg ) {
2018-08-16 17:54:45 +03:00
sum_orig = batadv_iv_orig_ifinfo_sum ( router - > orig_node ,
router - > if_incoming ) ;
sum_neigh = batadv_iv_orig_ifinfo_sum ( neigh_node - > orig_node ,
neigh_node - > if_incoming ) ;
2012-07-08 19:13:15 +04:00
if ( sum_orig > = sum_neigh )
2013-04-23 17:40:01 +04:00
goto out ;
2011-07-30 14:04:12 +04:00
}
2013-11-13 22:14:47 +04:00
batadv_update_route ( bat_priv , orig_node , if_outgoing , neigh_node ) ;
2011-07-30 14:04:12 +04:00
goto out ;
unlock :
rcu_read_unlock ( ) ;
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( neigh_node ) ;
batadv_neigh_node_put ( router ) ;
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
batadv_neigh_ifinfo_put ( router_ifinfo ) ;
2011-07-30 14:04:12 +04:00
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_calc_tq ( ) - calculate tq for current received ogm packet
2013-11-13 22:14:46 +04:00
* @ orig_node : the orig node who originally emitted the ogm packet
* @ orig_neigh_node : the orig node struct of the neighbor who sent the packet
* @ batadv_ogm_packet : the ogm packet
* @ if_incoming : interface where the packet was received
* @ if_outgoing : interface for which the retransmission should be considered
*
2016-02-22 23:02:39 +03:00
* Return : true if the link can be considered bidirectional , false otherwise
2013-11-13 22:14:46 +04:00
*/
2016-02-22 23:02:39 +03:00
static bool batadv_iv_ogm_calc_tq ( struct batadv_orig_node * orig_node ,
struct batadv_orig_node * orig_neigh_node ,
struct batadv_ogm_packet * batadv_ogm_packet ,
struct batadv_hard_iface * if_incoming ,
struct batadv_hard_iface * if_outgoing )
2011-07-30 14:04:12 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
struct batadv_neigh_node * neigh_node = NULL , * tmp_neigh_node ;
2013-11-13 22:14:46 +04:00
struct batadv_neigh_ifinfo * neigh_ifinfo ;
2015-05-26 19:34:26 +03:00
u8 total_count ;
u8 orig_eq_count , neigh_rq_count , neigh_rq_inv , tq_own ;
2020-06-01 23:35:22 +03:00
unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE ;
2012-06-04 00:19:17 +04:00
unsigned int neigh_rq_inv_cube , neigh_rq_max_cube ;
2016-02-16 12:47:07 +03:00
unsigned int tq_asym_penalty , inv_asym_penalty ;
2012-06-04 00:19:17 +04:00
unsigned int combined_tq ;
2016-02-22 23:02:39 +03:00
bool ret = false ;
2011-07-30 14:04:12 +04:00
/* find corresponding one hop neighbor */
rcu_read_lock ( ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_rcu ( tmp_neigh_node ,
2011-07-30 14:04:12 +04:00
& orig_neigh_node - > neigh_list , list ) {
2012-05-12 15:48:58 +04:00
if ( ! batadv_compare_eth ( tmp_neigh_node - > addr ,
orig_neigh_node - > orig ) )
2011-07-30 14:04:12 +04:00
continue ;
if ( tmp_neigh_node - > if_incoming ! = if_incoming )
continue ;
2016-01-16 12:29:53 +03:00
if ( ! kref_get_unless_zero ( & tmp_neigh_node - > refcount ) )
2011-07-30 14:04:12 +04:00
continue ;
neigh_node = tmp_neigh_node ;
break ;
}
rcu_read_unlock ( ) ;
if ( ! neigh_node )
2012-05-12 20:33:51 +04:00
neigh_node = batadv_iv_ogm_neigh_new ( if_incoming ,
orig_neigh_node - > orig ,
orig_neigh_node ,
2013-03-25 16:49:46 +04:00
orig_neigh_node ) ;
2011-07-30 14:04:12 +04:00
if ( ! neigh_node )
goto out ;
2012-03-01 11:35:19 +04:00
/* if orig_node is direct neighbor update neigh_node last_seen */
2011-07-30 14:04:12 +04:00
if ( orig_node = = orig_neigh_node )
2012-03-01 11:35:19 +04:00
neigh_node - > last_seen = jiffies ;
2011-07-30 14:04:12 +04:00
2012-03-01 11:35:19 +04:00
orig_node - > last_seen = jiffies ;
2011-07-30 14:04:12 +04:00
/* find packet count of corresponding one hop neighbor */
2018-08-16 17:54:45 +03:00
orig_eq_count = batadv_iv_orig_ifinfo_sum ( orig_neigh_node , if_incoming ) ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo = batadv_neigh_ifinfo_new ( neigh_node , if_outgoing ) ;
if ( neigh_ifinfo ) {
neigh_rq_count = neigh_ifinfo - > bat_iv . real_packet_count ;
2016-01-17 13:01:12 +03:00
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
2013-11-13 22:14:46 +04:00
} else {
neigh_rq_count = 0 ;
}
2011-07-30 14:04:12 +04:00
/* pay attention to not get a value bigger than 100 % */
2012-07-08 20:33:51 +04:00
if ( orig_eq_count > neigh_rq_count )
total_count = neigh_rq_count ;
else
total_count = orig_eq_count ;
2011-07-30 14:04:12 +04:00
2012-05-12 04:09:43 +04:00
/* if we have too few packets (too less data) we set tq_own to zero
* if we receive too few packets it is not considered bidirectional
*/
2012-06-04 00:19:17 +04:00
if ( total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM | |
neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM )
2011-07-30 14:04:12 +04:00
tq_own = 0 ;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
2012-05-12 04:09:43 +04:00
* information
*/
2012-06-04 00:19:17 +04:00
tq_own = ( BATADV_TQ_MAX_VALUE * total_count ) / neigh_rq_count ;
2011-07-30 14:04:12 +04:00
2012-03-07 12:07:46 +04:00
/* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
2011-07-30 14:04:12 +04:00
* affect the nearly - symmetric links only a little , but
* punishes asymmetric links more . This will give a value
* between 0 and TQ_MAX_VALUE
*/
2012-06-04 00:19:17 +04:00
neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count ;
neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv ;
neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
BATADV_TQ_LOCAL_WINDOW_SIZE *
BATADV_TQ_LOCAL_WINDOW_SIZE ;
inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube ;
inv_asym_penalty / = neigh_rq_max_cube ;
tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty ;
2020-06-01 23:35:22 +03:00
tq_iface_hop_penalty - = atomic_read ( & if_incoming - > hop_penalty ) ;
2012-06-04 00:19:17 +04:00
2013-11-13 22:14:48 +04:00
/* penalize if the OGM is forwarded on the same interface. WiFi
* interfaces and other half duplex devices suffer from throughput
* drops as they can ' t send and receive at the same time .
*/
2017-08-23 22:52:13 +03:00
if ( if_outgoing & & if_incoming = = if_outgoing & &
2016-09-30 16:21:03 +03:00
batadv_is_wifi_hardif ( if_outgoing ) )
2020-06-01 23:35:22 +03:00
tq_iface_hop_penalty = batadv_hop_penalty ( tq_iface_hop_penalty ,
bat_priv ) ;
2013-11-13 22:14:48 +04:00
combined_tq = batadv_ogm_packet - > tq *
tq_own *
tq_asym_penalty *
2020-06-01 23:35:22 +03:00
tq_iface_hop_penalty ;
2013-11-13 22:14:48 +04:00
combined_tq / = BATADV_TQ_MAX_VALUE *
BATADV_TQ_MAX_VALUE *
BATADV_TQ_MAX_VALUE ;
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > tq = combined_tq ;
2011-07-30 14:04:12 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2020-06-01 23:35:22 +03:00
" bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s \n " ,
2012-05-12 15:48:58 +04:00
orig_node - > orig , orig_neigh_node - > orig , total_count ,
2020-06-01 23:35:22 +03:00
neigh_rq_count , tq_own , tq_asym_penalty ,
tq_iface_hop_penalty , batadv_ogm_packet - > tq ,
if_incoming - > net_dev - > name ,
2013-11-13 22:14:48 +04:00
if_outgoing ? if_outgoing - > net_dev - > name : " DEFAULT " ) ;
2011-07-30 14:04:12 +04:00
/* if link has the minimum required transmission quality
2012-05-12 04:09:43 +04:00
* consider it bidirectional
*/
2012-06-06 00:31:30 +04:00
if ( batadv_ogm_packet - > tq > = BATADV_TQ_TOTAL_BIDRECT_LIMIT )
2016-02-22 23:02:39 +03:00
ret = true ;
2011-07-30 14:04:12 +04:00
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( neigh_node ) ;
2011-07-30 14:04:12 +04:00
return ret ;
}
2013-05-23 15:07:42 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_update_seqnos ( ) - process a batman packet for all interfaces ,
2013-05-23 15:07:42 +04:00
* adjust the sequence number and find out whether it is a duplicate
* @ ethhdr : ethernet header of the packet
* @ batadv_ogm_packet : OGM packet to be considered
* @ if_incoming : interface on which the OGM packet was received
2013-11-13 22:14:46 +04:00
* @ if_outgoing : interface for which the retransmission should be considered
2013-05-23 15:07:42 +04:00
*
2015-09-15 20:00:48 +03:00
* Return : duplicate status as enum batadv_dup_status
2011-07-30 14:04:12 +04:00
*/
2013-05-23 15:07:42 +04:00
static enum batadv_dup_status
2012-05-12 20:33:51 +04:00
batadv_iv_ogm_update_seqnos ( const struct ethhdr * ethhdr ,
2012-06-06 00:31:30 +04:00
const struct batadv_ogm_packet * batadv_ogm_packet ,
2013-11-13 22:14:46 +04:00
const struct batadv_hard_iface * if_incoming ,
struct batadv_hard_iface * if_outgoing )
2011-07-30 14:04:12 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
struct batadv_orig_node * orig_node ;
2013-11-13 22:14:47 +04:00
struct batadv_orig_ifinfo * orig_ifinfo = NULL ;
2013-11-13 22:14:46 +04:00
struct batadv_neigh_node * neigh_node ;
struct batadv_neigh_ifinfo * neigh_ifinfo ;
2016-02-22 23:02:39 +03:00
bool is_dup ;
2015-05-26 19:34:26 +03:00
s32 seq_diff ;
2016-02-22 23:02:39 +03:00
bool need_update = false ;
2013-05-23 15:07:42 +04:00
int set_mark ;
enum batadv_dup_status ret = BATADV_NO_DUP ;
2015-05-26 19:34:26 +03:00
u32 seqno = ntohl ( batadv_ogm_packet - > seqno ) ;
u8 * neigh_addr ;
u8 packet_count ;
2013-09-02 14:15:01 +04:00
unsigned long * bitmap ;
2011-07-30 14:04:12 +04:00
2013-09-02 14:15:02 +04:00
orig_node = batadv_iv_ogm_orig_get ( bat_priv , batadv_ogm_packet - > orig ) ;
2011-07-30 14:04:12 +04:00
if ( ! orig_node )
2013-05-23 15:07:42 +04:00
return BATADV_NO_DUP ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
orig_ifinfo = batadv_orig_ifinfo_new ( orig_node , if_outgoing ) ;
if ( WARN_ON ( ! orig_ifinfo ) ) {
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2013-11-13 22:14:47 +04:00
return 0 ;
}
2013-09-02 14:15:02 +04:00
spin_lock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
2013-11-13 22:14:47 +04:00
seq_diff = seqno - orig_ifinfo - > last_real_seqno ;
2011-07-30 14:04:12 +04:00
/* signalize caller that the packet is to be dropped. */
2012-02-26 18:39:42 +04:00
if ( ! hlist_empty ( & orig_node - > neigh_list ) & &
2012-05-12 04:09:36 +04:00
batadv_window_protected ( bat_priv , seq_diff ,
2015-11-23 21:57:22 +03:00
BATADV_TQ_LOCAL_WINDOW_SIZE ,
& orig_ifinfo - > batman_seqno_reset , NULL ) ) {
2013-05-23 15:07:42 +04:00
ret = BATADV_PROTECTED ;
2011-07-30 14:04:12 +04:00
goto out ;
2013-05-23 15:07:42 +04:00
}
2011-07-30 14:04:12 +04:00
rcu_read_lock ( ) ;
2013-11-13 22:14:46 +04:00
hlist_for_each_entry_rcu ( neigh_node , & orig_node - > neigh_list , list ) {
neigh_ifinfo = batadv_neigh_ifinfo_new ( neigh_node ,
if_outgoing ) ;
if ( ! neigh_ifinfo )
continue ;
neigh_addr = neigh_node - > addr ;
is_dup = batadv_test_bit ( neigh_ifinfo - > bat_iv . real_bits ,
2013-11-13 22:14:47 +04:00
orig_ifinfo - > last_real_seqno ,
2013-05-23 15:07:42 +04:00
seqno ) ;
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( neigh_addr , ethhdr - > h_source ) & &
2013-11-13 22:14:46 +04:00
neigh_node - > if_incoming = = if_incoming ) {
2011-07-30 14:04:12 +04:00
set_mark = 1 ;
2013-05-23 15:07:42 +04:00
if ( is_dup )
ret = BATADV_NEIGH_DUP ;
} else {
2011-07-30 14:04:12 +04:00
set_mark = 0 ;
2017-08-23 22:52:13 +03:00
if ( is_dup & & ret ! = BATADV_NEIGH_DUP )
2013-05-23 15:07:42 +04:00
ret = BATADV_ORIG_DUP ;
}
2011-07-30 14:04:12 +04:00
/* if the window moved, set the update flag. */
2013-11-13 22:14:46 +04:00
bitmap = neigh_ifinfo - > bat_iv . real_bits ;
2013-09-02 14:15:01 +04:00
need_update | = batadv_bit_get_packet ( bat_priv , bitmap ,
2012-05-12 04:09:25 +04:00
seq_diff , set_mark ) ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:46 +04:00
packet_count = bitmap_weight ( bitmap ,
2012-07-08 19:13:15 +04:00
BATADV_TQ_LOCAL_WINDOW_SIZE ) ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo - > bat_iv . real_packet_count = packet_count ;
2016-01-17 13:01:12 +03:00
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
2011-07-30 14:04:12 +04:00
}
rcu_read_unlock ( ) ;
if ( need_update ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2013-11-13 22:14:47 +04:00
" %s updating last_seqno: old %u, new %u \n " ,
if_outgoing ? if_outgoing - > net_dev - > name : " DEFAULT " ,
orig_ifinfo - > last_real_seqno , seqno ) ;
orig_ifinfo - > last_real_seqno = seqno ;
2011-07-30 14:04:12 +04:00
}
out :
2013-09-02 14:15:02 +04:00
spin_unlock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2016-01-17 13:01:13 +03:00
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
2011-07-30 14:04:12 +04:00
return ret ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_process_per_outif ( ) - process a batman iv OGM for an outgoing
* interface
2013-11-13 22:14:47 +04:00
* @ skb : the skb containing the OGM
2014-07-15 11:41:05 +04:00
* @ ogm_offset : offset from skb - > data to start of ogm header
2013-11-13 22:14:47 +04:00
* @ orig_node : the ( cached ) orig node for the originator of this OGM
* @ if_incoming : the interface where this packet was received
* @ if_outgoing : the interface for which the packet should be considered
*/
static void
batadv_iv_ogm_process_per_outif ( const struct sk_buff * skb , int ogm_offset ,
struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * if_incoming ,
struct batadv_hard_iface * if_outgoing )
2011-07-30 14:04:12 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
2015-08-04 16:09:58 +03:00
struct batadv_hardif_neigh_node * hardif_neigh = NULL ;
2015-06-09 21:50:49 +03:00
struct batadv_neigh_node * router = NULL ;
struct batadv_neigh_node * router_router = NULL ;
2013-11-13 22:14:47 +04:00
struct batadv_orig_node * orig_neigh_node ;
struct batadv_orig_ifinfo * orig_ifinfo ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * orig_neigh_router = NULL ;
2013-11-13 22:14:46 +04:00
struct batadv_neigh_ifinfo * router_ifinfo = NULL ;
2013-11-13 22:14:47 +04:00
struct batadv_ogm_packet * ogm_packet ;
2013-05-23 15:07:42 +04:00
enum batadv_dup_status dup_status ;
2013-11-13 22:14:47 +04:00
bool is_from_best_next_hop = false ;
bool is_single_hop_neigh = false ;
bool sameseq , similar_ttl ;
struct sk_buff * skb_priv ;
struct ethhdr * ethhdr ;
2015-05-26 19:34:26 +03:00
u8 * prev_sender ;
2016-02-22 23:02:39 +03:00
bool is_bidirect ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
/* create a private copy of the skb, as some functions change tq value
* and / or flags .
2011-07-30 14:04:12 +04:00
*/
2013-11-13 22:14:47 +04:00
skb_priv = skb_copy ( skb , GFP_ATOMIC ) ;
if ( ! skb_priv )
2011-07-30 14:04:12 +04:00
return ;
2013-11-13 22:14:47 +04:00
ethhdr = eth_hdr ( skb_priv ) ;
ogm_packet = ( struct batadv_ogm_packet * ) ( skb_priv - > data + ogm_offset ) ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
dup_status = batadv_iv_ogm_update_seqnos ( ethhdr , ogm_packet ,
if_incoming , if_outgoing ) ;
if ( batadv_compare_eth ( ethhdr - > h_source , ogm_packet - > orig ) )
2012-03-01 11:35:16 +04:00
is_single_hop_neigh = true ;
2011-07-30 14:04:12 +04:00
2013-05-23 15:07:42 +04:00
if ( dup_status = = BATADV_PROTECTED ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: packet within seqno protection time (sender: %pM) \n " ,
ethhdr - > h_source ) ;
2011-07-30 14:04:12 +04:00
goto out ;
}
2013-11-13 22:14:47 +04:00
if ( ogm_packet - > tq = = 0 ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: originator packet with tq equal 0 \n " ) ;
2011-07-30 14:04:12 +04:00
goto out ;
}
2015-08-04 16:09:58 +03:00
if ( is_single_hop_neigh ) {
hardif_neigh = batadv_hardif_neigh_get ( if_incoming ,
ethhdr - > h_source ) ;
if ( hardif_neigh )
hardif_neigh - > last_seen = jiffies ;
}
2013-11-13 22:14:47 +04:00
router = batadv_orig_router_get ( orig_node , if_outgoing ) ;
2013-09-02 14:15:01 +04:00
if ( router ) {
2013-11-13 22:14:47 +04:00
router_router = batadv_orig_router_get ( router - > orig_node ,
if_outgoing ) ;
router_ifinfo = batadv_neigh_ifinfo_get ( router , if_outgoing ) ;
2013-09-02 14:15:01 +04:00
}
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
if ( ( router_ifinfo & & router_ifinfo - > bat_iv . tq_avg ! = 0 ) & &
2012-05-12 15:48:58 +04:00
( batadv_compare_eth ( router - > addr , ethhdr - > h_source ) ) )
2012-03-11 02:17:53 +04:00
is_from_best_next_hop = true ;
2013-11-13 22:14:47 +04:00
prev_sender = ogm_packet - > prev_sender ;
2011-07-30 14:04:12 +04:00
/* avoid temporary routing loops */
if ( router & & router_router & &
2012-05-12 15:48:58 +04:00
( batadv_compare_eth ( router - > addr , prev_sender ) ) & &
2013-11-13 22:14:47 +04:00
! ( batadv_compare_eth ( ogm_packet - > orig , prev_sender ) ) & &
2012-05-12 15:48:58 +04:00
( batadv_compare_eth ( router - > addr , router_router - > addr ) ) ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM) \n " ,
ethhdr - > h_source ) ;
2011-07-30 14:04:12 +04:00
goto out ;
}
2013-11-13 22:14:47 +04:00
if ( if_outgoing = = BATADV_IF_DEFAULT )
batadv_tvlv_ogm_receive ( bat_priv , ogm_packet , orig_node ) ;
2013-04-23 17:39:57 +04:00
2011-07-30 14:04:12 +04:00
/* if sender is a direct neighbor the sender mac equals
2012-05-12 04:09:43 +04:00
* originator mac
*/
2012-07-08 20:33:51 +04:00
if ( is_single_hop_neigh )
orig_neigh_node = orig_node ;
else
2013-09-02 14:15:02 +04:00
orig_neigh_node = batadv_iv_ogm_orig_get ( bat_priv ,
ethhdr - > h_source ) ;
2012-07-08 20:33:51 +04:00
2011-07-30 14:04:12 +04:00
if ( ! orig_neigh_node )
goto out ;
2013-01-25 14:12:39 +04:00
/* Update nc_nodes of the originator */
batadv_nc_update_nc_node ( bat_priv , orig_node , orig_neigh_node ,
2013-11-13 22:14:47 +04:00
ogm_packet , is_single_hop_neigh ) ;
2013-01-25 14:12:39 +04:00
2013-11-13 22:14:47 +04:00
orig_neigh_router = batadv_orig_router_get ( orig_neigh_node ,
if_outgoing ) ;
2011-07-30 14:04:12 +04:00
/* drop packet if sender is not a direct neighbor and if we
2012-05-12 04:09:43 +04:00
* don ' t route towards it
*/
2017-08-23 22:52:13 +03:00
if ( ! is_single_hop_neigh & & ! orig_neigh_router ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: OGM via unknown neighbor! \n " ) ;
2011-07-30 14:04:12 +04:00
goto out_neigh ;
}
2012-05-12 20:33:51 +04:00
is_bidirect = batadv_iv_ogm_calc_tq ( orig_node , orig_neigh_node ,
2013-11-13 22:14:47 +04:00
ogm_packet , if_incoming ,
if_outgoing ) ;
2011-07-30 14:04:12 +04:00
/* update ranking if it is not a duplicate or has the same
2012-05-12 04:09:43 +04:00
* seqno and similar ttl as the non - duplicate
*/
2013-11-13 22:14:47 +04:00
orig_ifinfo = batadv_orig_ifinfo_new ( orig_node , if_outgoing ) ;
if ( ! orig_ifinfo )
goto out_neigh ;
sameseq = orig_ifinfo - > last_real_seqno = = ntohl ( ogm_packet - > seqno ) ;
similar_ttl = ( orig_ifinfo - > last_ttl - 3 ) < = ogm_packet - > ttl ;
2017-08-23 22:52:13 +03:00
if ( is_bidirect & & ( dup_status = = BATADV_NO_DUP | |
2013-11-13 22:14:47 +04:00
( sameseq & & similar_ttl ) ) ) {
batadv_iv_ogm_orig_update ( bat_priv , orig_node ,
orig_ifinfo , ethhdr ,
ogm_packet , if_incoming ,
if_outgoing , dup_status ) ;
}
2016-01-17 13:01:13 +03:00
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:49 +04:00
/* only forward for specific interface, not for the default one. */
if ( if_outgoing = = BATADV_IF_DEFAULT )
goto out_neigh ;
2011-07-30 14:04:12 +04:00
/* is single hop (direct) neighbor */
if ( is_single_hop_neigh ) {
2013-11-13 22:14:47 +04:00
/* OGMs from secondary interfaces should only scheduled once
* per interface where it has been received , not multiple times
*/
2017-08-23 22:52:13 +03:00
if ( ogm_packet - > ttl < = 2 & &
if_incoming ! = if_outgoing ) {
2013-11-13 22:14:47 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Drop packet: OGM from secondary interface and wrong outgoing interface \n " ) ;
goto out_neigh ;
}
2011-07-30 14:04:12 +04:00
/* mark direct link on incoming interface */
2013-11-13 22:14:47 +04:00
batadv_iv_ogm_forward ( orig_node , ethhdr , ogm_packet ,
2012-05-12 20:33:51 +04:00
is_single_hop_neigh ,
2013-11-13 22:14:49 +04:00
is_from_best_next_hop , if_incoming ,
if_outgoing ) ;
2011-07-30 14:04:12 +04:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Forwarding packet: rebroadcast neighbor packet with direct link flag \n " ) ;
2011-07-30 14:04:12 +04:00
goto out_neigh ;
}
/* multihop originator */
2012-05-12 20:33:51 +04:00
if ( ! is_bidirect ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: not received via bidirectional link \n " ) ;
2011-07-30 14:04:12 +04:00
goto out_neigh ;
}
2013-05-23 15:07:42 +04:00
if ( dup_status = = BATADV_NEIGH_DUP ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: duplicate packet received \n " ) ;
2011-07-30 14:04:12 +04:00
goto out_neigh ;
}
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Forwarding packet: rebroadcast originator packet \n " ) ;
2013-11-13 22:14:47 +04:00
batadv_iv_ogm_forward ( orig_node , ethhdr , ogm_packet ,
2012-05-12 20:33:51 +04:00
is_single_hop_neigh , is_from_best_next_hop ,
2013-11-13 22:14:49 +04:00
if_incoming , if_outgoing ) ;
2011-07-30 14:04:12 +04:00
out_neigh :
2017-08-23 22:52:13 +03:00
if ( orig_neigh_node & & ! is_single_hop_neigh )
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_neigh_node ) ;
2011-07-30 14:04:12 +04:00
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_ifinfo_put ( router_ifinfo ) ;
batadv_neigh_node_put ( router ) ;
batadv_neigh_node_put ( router_router ) ;
batadv_neigh_node_put ( orig_neigh_router ) ;
batadv_hardif_neigh_put ( hardif_neigh ) ;
2011-07-30 14:04:12 +04:00
2016-07-17 22:04:00 +03:00
consume_skb ( skb_priv ) ;
2013-11-13 22:14:47 +04:00
}
2018-08-16 17:54:45 +03:00
/**
* batadv_iv_ogm_process_reply ( ) - Check OGM for direct reply and process it
* @ ogm_packet : rebroadcast OGM packet to process
* @ if_incoming : the interface where this packet was received
* @ orig_node : originator which reproadcasted the OGMs
* @ if_incoming_seqno : OGM sequence number when rebroadcast was received
*/
static void batadv_iv_ogm_process_reply ( struct batadv_ogm_packet * ogm_packet ,
struct batadv_hard_iface * if_incoming ,
struct batadv_orig_node * orig_node ,
u32 if_incoming_seqno )
{
struct batadv_orig_ifinfo * orig_ifinfo ;
s32 bit_pos ;
u8 * weight ;
/* neighbor has to indicate direct link and it has to
* come via the corresponding interface
*/
if ( ! ( ogm_packet - > flags & BATADV_DIRECTLINK ) )
return ;
if ( ! batadv_compare_eth ( if_incoming - > net_dev - > dev_addr ,
ogm_packet - > orig ) )
return ;
orig_ifinfo = batadv_orig_ifinfo_get ( orig_node , if_incoming ) ;
if ( ! orig_ifinfo )
return ;
/* save packet seqno for bidirectional check */
spin_lock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
bit_pos = if_incoming_seqno - 2 ;
bit_pos - = ntohl ( ogm_packet - > seqno ) ;
batadv_set_bit ( orig_ifinfo - > bat_iv . bcast_own , bit_pos ) ;
weight = & orig_ifinfo - > bat_iv . bcast_own_sum ;
* weight = bitmap_weight ( orig_ifinfo - > bat_iv . bcast_own ,
BATADV_TQ_LOCAL_WINDOW_SIZE ) ;
spin_unlock_bh ( & orig_node - > bat_iv . ogm_cnt_lock ) ;
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_process ( ) - process an incoming batman iv OGM
2013-11-13 22:14:47 +04:00
* @ skb : the skb containing the OGM
* @ ogm_offset : offset to the OGM which should be processed ( for aggregates )
2020-06-01 21:13:21 +03:00
* @ if_incoming : the interface where this packet was received
2013-11-13 22:14:47 +04:00
*/
static void batadv_iv_ogm_process ( const struct sk_buff * skb , int ogm_offset ,
struct batadv_hard_iface * if_incoming )
{
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
struct batadv_orig_node * orig_neigh_node , * orig_node ;
struct batadv_hard_iface * hard_iface ;
struct batadv_ogm_packet * ogm_packet ;
2015-05-26 19:34:26 +03:00
u32 if_incoming_seqno ;
2013-11-13 22:14:47 +04:00
bool has_directlink_flag ;
struct ethhdr * ethhdr ;
bool is_my_oldorig = false ;
bool is_my_addr = false ;
bool is_my_orig = false ;
ogm_packet = ( struct batadv_ogm_packet * ) ( skb - > data + ogm_offset ) ;
ethhdr = eth_hdr ( skb ) ;
/* Silently drop when the batman packet is actually not a
* correct packet .
*
* This might happen if a packet is padded ( e . g . Ethernet has a
* minimum frame length of 64 byte ) and the aggregation interprets
* it as an additional length .
*
* TODO : A more sane solution would be to have a bit in the
* batadv_ogm_packet to detect whether the packet is the last
* packet in an aggregation . Here we expect that the padding
* is always zero ( or not 0x01 )
*/
if ( ogm_packet - > packet_type ! = BATADV_IV_OGM )
return ;
/* could be changed by schedule_own_packet() */
if_incoming_seqno = atomic_read ( & if_incoming - > bat_iv . ogm_seqno ) ;
if ( ogm_packet - > flags & BATADV_DIRECTLINK )
has_directlink_flag = true ;
else
has_directlink_flag = false ;
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d) \n " ,
ethhdr - > h_source , if_incoming - > net_dev - > name ,
if_incoming - > net_dev - > dev_addr , ogm_packet - > orig ,
ogm_packet - > prev_sender , ntohl ( ogm_packet - > seqno ) ,
ogm_packet - > tq , ogm_packet - > ttl ,
ogm_packet - > version , has_directlink_flag ) ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list , list ) {
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
continue ;
if ( hard_iface - > soft_iface ! = if_incoming - > soft_iface )
continue ;
if ( batadv_compare_eth ( ethhdr - > h_source ,
hard_iface - > net_dev - > dev_addr ) )
is_my_addr = true ;
if ( batadv_compare_eth ( ogm_packet - > orig ,
hard_iface - > net_dev - > dev_addr ) )
is_my_orig = true ;
if ( batadv_compare_eth ( ogm_packet - > prev_sender ,
hard_iface - > net_dev - > dev_addr ) )
is_my_oldorig = true ;
}
rcu_read_unlock ( ) ;
if ( is_my_addr ) {
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Drop packet: received my own broadcast (sender: %pM) \n " ,
ethhdr - > h_source ) ;
return ;
}
if ( is_my_orig ) {
orig_neigh_node = batadv_iv_ogm_orig_get ( bat_priv ,
ethhdr - > h_source ) ;
if ( ! orig_neigh_node )
return ;
2018-08-16 17:54:45 +03:00
batadv_iv_ogm_process_reply ( ogm_packet , if_incoming ,
orig_neigh_node , if_incoming_seqno ) ;
2013-11-13 22:14:47 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Drop packet: originator packet from myself (via neighbor) \n " ) ;
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_neigh_node ) ;
2013-11-13 22:14:47 +04:00
return ;
}
if ( is_my_oldorig ) {
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Drop packet: ignoring all rebroadcast echos (sender: %pM) \n " ,
ethhdr - > h_source ) ;
return ;
}
if ( ogm_packet - > flags & BATADV_NOT_BEST_NEXT_HOP ) {
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM) \n " ,
ethhdr - > h_source ) ;
return ;
}
orig_node = batadv_iv_ogm_orig_get ( bat_priv , ogm_packet - > orig ) ;
if ( ! orig_node )
return ;
batadv_iv_ogm_process_per_outif ( skb , ogm_offset , orig_node ,
if_incoming , BATADV_IF_DEFAULT ) ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list , list ) {
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
continue ;
if ( hard_iface - > soft_iface ! = bat_priv - > soft_iface )
continue ;
2016-03-05 18:09:16 +03:00
if ( ! kref_get_unless_zero ( & hard_iface - > refcount ) )
continue ;
2013-11-13 22:14:47 +04:00
batadv_iv_ogm_process_per_outif ( skb , ogm_offset , orig_node ,
if_incoming , hard_iface ) ;
2016-03-05 18:09:16 +03:00
batadv_hardif_put ( hard_iface ) ;
2013-11-13 22:14:47 +04:00
}
rcu_read_unlock ( ) ;
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2011-07-30 14:04:12 +04:00
}
2016-05-02 20:45:34 +03:00
static void batadv_iv_send_outstanding_bat_ogm_packet ( struct work_struct * work )
{
struct delayed_work * delayed_work ;
struct batadv_forw_packet * forw_packet ;
struct batadv_priv * bat_priv ;
2016-07-17 22:04:00 +03:00
bool dropped = false ;
2016-05-02 20:45:34 +03:00
delayed_work = to_delayed_work ( work ) ;
forw_packet = container_of ( delayed_work , struct batadv_forw_packet ,
delayed_work ) ;
bat_priv = netdev_priv ( forw_packet - > if_incoming - > soft_iface ) ;
2016-07-17 22:04:00 +03:00
if ( atomic_read ( & bat_priv - > mesh_state ) = = BATADV_MESH_DEACTIVATING ) {
dropped = true ;
2016-05-02 20:45:34 +03:00
goto out ;
2016-07-17 22:04:00 +03:00
}
2016-05-02 20:45:34 +03:00
batadv_iv_ogm_emit ( forw_packet ) ;
/* we have to have at least one packet in the queue to determine the
* queues wake up time unless we are shutting down .
*
* only re - schedule if this is the " original " copy , e . g . the OGM of the
* primary interface should only be rescheduled once per period , but
* this function will be called for the forw_packet instances of the
* other secondary interfaces as well .
*/
if ( forw_packet - > own & &
forw_packet - > if_incoming = = forw_packet - > if_outgoing )
batadv_iv_ogm_schedule ( forw_packet - > if_incoming ) ;
out :
batman-adv: fix rare race conditions on interface removal
In rare cases during shutdown the following general protection fault can
happen:
general protection fault: 0000 [#1] SMP
Modules linked in: batman_adv(O-) [...]
CPU: 3 PID: 1714 Comm: rmmod Tainted: G O 4.6.0-rc6+ #1
[...]
Call Trace:
[<ffffffffa0363294>] batadv_hardif_disable_interface+0x29a/0x3a6 [batman_adv]
[<ffffffffa0373db4>] batadv_softif_destroy_netlink+0x4b/0xa4 [batman_adv]
[<ffffffff813b52f3>] __rtnl_link_unregister+0x48/0x92
[<ffffffff813b9240>] rtnl_link_unregister+0xc1/0xdb
[<ffffffff8108547c>] ? bit_waitqueue+0x87/0x87
[<ffffffffa03850d2>] batadv_exit+0x1a/0xf48 [batman_adv]
[<ffffffff810c26f9>] SyS_delete_module+0x136/0x1b0
[<ffffffff8144dc65>] entry_SYSCALL_64_fastpath+0x18/0xa8
[<ffffffff8108aaca>] ? trace_hardirqs_off_caller+0x37/0xa6
Code: 89 f7 e8 21 bd 0d e1 4d 85 e4 75 0e 31 f6 48 c7 c7 50 d7 3b a0 e8 50 16 f2 e0 49 8b 9c 24 28 01 00 00 48 85 db 0f 84 b2 00 00 00 <48> 8b 03 4d 85 ed 48 89 45 c8 74 09 4c 39 ab f8 00 00 00 75 1c
RIP [<ffffffffa0371852>] batadv_purge_outstanding_packets+0x1c8/0x291 [batman_adv]
RSP <ffff88001da5fd78>
---[ end trace 803b9bdc6a4a952b ]---
Kernel panic - not syncing: Fatal exception in interrupt
Kernel Offset: disabled
---[ end Kernel panic - not syncing: Fatal exception in interrupt
It does not happen often, but may potentially happen when frequently
shutting down and reinitializing an interface. With some carefully
placed msleep()s/mdelay()s it can be reproduced easily.
The issue is, that on interface removal, any still running worker thread
of a forwarding packet will race with the interface purging routine to
free a forwarding packet. Temporarily giving up a spin-lock to be able
to sleep in the purging routine is not safe.
Furthermore, there is a potential general protection fault not just for
the purging side shown above, but also on the worker side: Temporarily
removing a forw_packet from the according forw_{bcast,bat}_list will make
it impossible for the purging routine to catch and cancel it.
# How this patch tries to fix it:
With this patch we split the queue purging into three steps: Step 1),
removing forward packets from the queue of an interface and by that
claim it as our responsibility to free.
Step 2), we are either lucky to cancel a pending worker before it starts
to run. Or if it is already running, we wait and let it do its thing,
except two things:
Through the claiming in step 1) we prevent workers from a) re-arming
themselves. And b) prevent workers from freeing packets which we still
hold in the interface purging routine.
Finally, step 3, we are sure that no forwarding packets are pending or
even running anymore on the interface to remove. We can then safely free
the claimed forwarding packets.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-11-01 11:44:44 +03:00
/* do we get something for free()? */
if ( batadv_forw_packet_steal ( forw_packet ,
& bat_priv - > forw_bat_list_lock ) )
batadv_forw_packet_free ( forw_packet , dropped ) ;
2016-05-02 20:45:34 +03:00
}
2012-05-12 20:33:51 +04:00
static int batadv_iv_ogm_receive ( struct sk_buff * skb ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming )
2011-07-30 14:04:12 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( if_incoming - > soft_iface ) ;
2013-11-13 22:14:47 +04:00
struct batadv_ogm_packet * ogm_packet ;
2015-05-26 19:34:26 +03:00
u8 * packet_pos ;
2013-11-13 22:14:47 +04:00
int ogm_offset ;
2016-07-17 22:04:04 +03:00
bool res ;
int ret = NET_RX_DROP ;
2012-03-04 12:56:25 +04:00
2016-07-17 22:04:04 +03:00
res = batadv_check_management_packet ( skb , if_incoming , BATADV_OGM_HLEN ) ;
if ( ! res )
goto free_skb ;
2011-07-30 14:04:12 +04:00
2012-03-11 02:17:49 +04:00
/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
* that does not have B . A . T . M . A . N . IV enabled ?
*/
2016-05-25 18:27:31 +03:00
if ( bat_priv - > algo_ops - > iface . enable ! = batadv_iv_ogm_iface_enable )
2016-07-17 22:04:04 +03:00
goto free_skb ;
2012-03-11 02:17:49 +04:00
2012-06-04 00:19:20 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_MGMT_RX ) ;
batadv_add_counter ( bat_priv , BATADV_CNT_MGMT_RX_BYTES ,
2012-04-20 19:02:45 +04:00
skb - > len + ETH_HLEN ) ;
2013-11-13 22:14:47 +04:00
ogm_offset = 0 ;
ogm_packet = ( struct batadv_ogm_packet * ) skb - > data ;
2011-07-30 14:04:12 +04:00
/* unpack the aggregated packets and process them one by one */
2013-11-13 22:14:47 +04:00
while ( batadv_iv_ogm_aggr_packet ( ogm_offset , skb_headlen ( skb ) ,
2019-08-22 09:55:36 +03:00
ogm_packet ) ) {
2013-11-13 22:14:47 +04:00
batadv_iv_ogm_process ( skb , ogm_offset , if_incoming ) ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
ogm_offset + = BATADV_OGM_HLEN ;
ogm_offset + = ntohs ( ogm_packet - > tvlv_len ) ;
2011-07-30 14:04:12 +04:00
2013-11-13 22:14:47 +04:00
packet_pos = skb - > data + ogm_offset ;
ogm_packet = ( struct batadv_ogm_packet * ) packet_pos ;
2013-03-04 06:39:49 +04:00
}
2012-03-04 12:56:25 +04:00
2016-07-17 22:04:04 +03:00
ret = NET_RX_SUCCESS ;
free_skb :
if ( ret = = NET_RX_SUCCESS )
consume_skb ( skb ) ;
else
kfree_skb ( skb ) ;
return ret ;
2011-07-30 14:04:12 +04:00
}
2011-11-28 13:40:17 +04:00
2016-07-03 14:31:40 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_get_tq_avg ( ) - Get the TQ average for a neighbour on a
2016-07-03 14:31:40 +03:00
* given outgoing interface .
* @ neigh_node : Neighbour of interest
* @ if_outgoing : Outgoing interface of interest
* @ tq_avg : Pointer of where to store the TQ average
*
* Return : False if no average TQ available , otherwise true .
*/
static bool
batadv_iv_ogm_neigh_get_tq_avg ( struct batadv_neigh_node * neigh_node ,
struct batadv_hard_iface * if_outgoing ,
u8 * tq_avg )
{
struct batadv_neigh_ifinfo * n_ifinfo ;
n_ifinfo = batadv_neigh_ifinfo_get ( neigh_node , if_outgoing ) ;
if ( ! n_ifinfo )
return false ;
* tq_avg = n_ifinfo - > bat_iv . tq_avg ;
batadv_neigh_ifinfo_put ( n_ifinfo ) ;
return true ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_dump_subentry ( ) - Dump an originator subentry into a
2016-07-03 14:31:40 +03:00
* message
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
* @ seq : Sequence number of netlink message
* @ bat_priv : The bat priv with all the soft interface information
* @ if_outgoing : Limit dump to entries with this outgoing interface
* @ orig_node : Originator to dump
* @ neigh_node : Single hops neighbour
* @ best : Is the best originator
*
* Return : Error code , or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_subentry ( struct sk_buff * msg , u32 portid , u32 seq ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * if_outgoing ,
struct batadv_orig_node * orig_node ,
struct batadv_neigh_node * neigh_node ,
bool best )
{
void * hdr ;
u8 tq_avg ;
unsigned int last_seen_msecs ;
last_seen_msecs = jiffies_to_msecs ( jiffies - orig_node - > last_seen ) ;
if ( ! batadv_iv_ogm_neigh_get_tq_avg ( neigh_node , if_outgoing , & tq_avg ) )
return 0 ;
if ( if_outgoing ! = BATADV_IF_DEFAULT & &
if_outgoing ! = neigh_node - > if_incoming )
return 0 ;
hdr = genlmsg_put ( msg , portid , seq , & batadv_netlink_family ,
NLM_F_MULTI , BATADV_CMD_GET_ORIGINATORS ) ;
if ( ! hdr )
return - ENOBUFS ;
if ( nla_put ( msg , BATADV_ATTR_ORIG_ADDRESS , ETH_ALEN ,
orig_node - > orig ) | |
nla_put ( msg , BATADV_ATTR_NEIGH_ADDRESS , ETH_ALEN ,
neigh_node - > addr ) | |
2021-05-10 16:05:42 +03:00
nla_put_string ( msg , BATADV_ATTR_HARD_IFNAME ,
neigh_node - > if_incoming - > net_dev - > name ) | |
2016-07-03 14:31:40 +03:00
nla_put_u32 ( msg , BATADV_ATTR_HARD_IFINDEX ,
neigh_node - > if_incoming - > net_dev - > ifindex ) | |
nla_put_u8 ( msg , BATADV_ATTR_TQ , tq_avg ) | |
nla_put_u32 ( msg , BATADV_ATTR_LAST_SEEN_MSECS ,
last_seen_msecs ) )
goto nla_put_failure ;
if ( best & & nla_put_flag ( msg , BATADV_ATTR_FLAG_BEST ) )
goto nla_put_failure ;
genlmsg_end ( msg , hdr ) ;
return 0 ;
nla_put_failure :
genlmsg_cancel ( msg , hdr ) ;
return - EMSGSIZE ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_dump_entry ( ) - Dump an originator entry into a message
2016-07-03 14:31:40 +03:00
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
* @ seq : Sequence number of netlink message
* @ bat_priv : The bat priv with all the soft interface information
* @ if_outgoing : Limit dump to entries with this outgoing interface
* @ orig_node : Originator to dump
* @ sub_s : Number of sub entries to skip
*
* This function assumes the caller holds rcu_read_lock ( ) .
*
* Return : Error code , or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_entry ( struct sk_buff * msg , u32 portid , u32 seq ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * if_outgoing ,
struct batadv_orig_node * orig_node , int * sub_s )
{
struct batadv_neigh_node * neigh_node_best ;
struct batadv_neigh_node * neigh_node ;
int sub = 0 ;
bool best ;
u8 tq_avg_best ;
neigh_node_best = batadv_orig_router_get ( orig_node , if_outgoing ) ;
if ( ! neigh_node_best )
goto out ;
if ( ! batadv_iv_ogm_neigh_get_tq_avg ( neigh_node_best , if_outgoing ,
& tq_avg_best ) )
goto out ;
if ( tq_avg_best = = 0 )
goto out ;
hlist_for_each_entry_rcu ( neigh_node , & orig_node - > neigh_list , list ) {
if ( sub + + < * sub_s )
continue ;
best = ( neigh_node = = neigh_node_best ) ;
if ( batadv_iv_ogm_orig_dump_subentry ( msg , portid , seq ,
bat_priv , if_outgoing ,
orig_node , neigh_node ,
best ) ) {
batadv_neigh_node_put ( neigh_node_best ) ;
* sub_s = sub - 1 ;
return - EMSGSIZE ;
}
}
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( neigh_node_best ) ;
2016-07-03 14:31:40 +03:00
* sub_s = 0 ;
return 0 ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_dump_bucket ( ) - Dump an originator bucket into a
2016-07-03 14:31:40 +03:00
* message
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
* @ seq : Sequence number of netlink message
* @ bat_priv : The bat priv with all the soft interface information
* @ if_outgoing : Limit dump to entries with this outgoing interface
* @ head : Bucket to be dumped
* @ idx_s : Number of entries to be skipped
* @ sub : Number of sub entries to be skipped
*
* Return : Error code , or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_bucket ( struct sk_buff * msg , u32 portid , u32 seq ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * if_outgoing ,
struct hlist_head * head , int * idx_s , int * sub )
{
struct batadv_orig_node * orig_node ;
int idx = 0 ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( orig_node , head , hash_entry ) {
if ( idx + + < * idx_s )
continue ;
if ( batadv_iv_ogm_orig_dump_entry ( msg , portid , seq , bat_priv ,
if_outgoing , orig_node ,
sub ) ) {
rcu_read_unlock ( ) ;
* idx_s = idx - 1 ;
return - EMSGSIZE ;
}
}
rcu_read_unlock ( ) ;
* idx_s = 0 ;
* sub = 0 ;
return 0 ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_orig_dump ( ) - Dump the originators into a message
2016-07-03 14:31:40 +03:00
* @ msg : Netlink message to dump into
* @ cb : Control block containing additional options
* @ bat_priv : The bat priv with all the soft interface information
* @ if_outgoing : Limit dump to entries with this outgoing interface
*/
static void
batadv_iv_ogm_orig_dump ( struct sk_buff * msg , struct netlink_callback * cb ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
struct hlist_head * head ;
int bucket = cb - > args [ 0 ] ;
int idx = cb - > args [ 1 ] ;
int sub = cb - > args [ 2 ] ;
int portid = NETLINK_CB ( cb - > skb ) . portid ;
while ( bucket < hash - > size ) {
head = & hash - > table [ bucket ] ;
if ( batadv_iv_ogm_orig_dump_bucket ( msg , portid ,
cb - > nlh - > nlmsg_seq ,
bat_priv , if_outgoing , head ,
& idx , & sub ) )
break ;
bucket + + ;
}
cb - > args [ 0 ] = bucket ;
cb - > args [ 1 ] = idx ;
cb - > args [ 2 ] = sub ;
}
2013-09-02 14:15:04 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_diff ( ) - calculate tq difference of two neighbors
2013-09-02 14:15:04 +04:00
* @ neigh1 : the first neighbor object of the comparison
2013-11-13 22:14:46 +04:00
* @ if_outgoing1 : outgoing interface for the first neighbor
2013-09-02 14:15:04 +04:00
* @ neigh2 : the second neighbor object of the comparison
2013-11-13 22:14:46 +04:00
* @ if_outgoing2 : outgoing interface for the second neighbor
2016-07-03 12:07:14 +03:00
* @ diff : pointer to integer receiving the calculated difference
2013-09-02 14:15:04 +04:00
*
2016-07-03 12:07:14 +03:00
* The content of * @ diff is only valid when this function returns true .
* It is less , equal to or greater than 0 if the metric via neigh1 is lower ,
* the same as or higher than the metric via neigh2
*
* Return : true when the difference could be calculated , false otherwise
2013-09-02 14:15:04 +04:00
*/
2016-07-03 12:07:14 +03:00
static bool batadv_iv_ogm_neigh_diff ( struct batadv_neigh_node * neigh1 ,
struct batadv_hard_iface * if_outgoing1 ,
struct batadv_neigh_node * neigh2 ,
struct batadv_hard_iface * if_outgoing2 ,
int * diff )
2013-09-02 14:15:04 +04:00
{
2013-11-13 22:14:46 +04:00
struct batadv_neigh_ifinfo * neigh1_ifinfo , * neigh2_ifinfo ;
2015-05-26 19:34:26 +03:00
u8 tq1 , tq2 ;
2016-07-03 12:07:14 +03:00
bool ret = true ;
2013-11-13 22:14:46 +04:00
neigh1_ifinfo = batadv_neigh_ifinfo_get ( neigh1 , if_outgoing1 ) ;
neigh2_ifinfo = batadv_neigh_ifinfo_get ( neigh2 , if_outgoing2 ) ;
2013-09-02 14:15:04 +04:00
2013-11-13 22:14:46 +04:00
if ( ! neigh1_ifinfo | | ! neigh2_ifinfo ) {
2016-07-03 12:07:14 +03:00
ret = false ;
2013-11-13 22:14:46 +04:00
goto out ;
}
2013-09-02 14:15:04 +04:00
2013-11-13 22:14:46 +04:00
tq1 = neigh1_ifinfo - > bat_iv . tq_avg ;
tq2 = neigh2_ifinfo - > bat_iv . tq_avg ;
2016-07-03 12:07:14 +03:00
* diff = ( int ) tq1 - ( int ) tq2 ;
2013-11-13 22:14:46 +04:00
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_ifinfo_put ( neigh1_ifinfo ) ;
batadv_neigh_ifinfo_put ( neigh2_ifinfo ) ;
2013-11-13 22:14:46 +04:00
2016-07-03 12:07:14 +03:00
return ret ;
}
2016-07-03 14:31:40 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_dump_neigh ( ) - Dump a neighbour into a netlink message
2016-07-03 14:31:40 +03:00
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
* @ seq : Sequence number of netlink message
* @ hardif_neigh : Neighbour to be dumped
*
* Return : Error code , or 0 on success
*/
static int
batadv_iv_ogm_neigh_dump_neigh ( struct sk_buff * msg , u32 portid , u32 seq ,
struct batadv_hardif_neigh_node * hardif_neigh )
{
void * hdr ;
unsigned int last_seen_msecs ;
last_seen_msecs = jiffies_to_msecs ( jiffies - hardif_neigh - > last_seen ) ;
hdr = genlmsg_put ( msg , portid , seq , & batadv_netlink_family ,
NLM_F_MULTI , BATADV_CMD_GET_NEIGHBORS ) ;
if ( ! hdr )
return - ENOBUFS ;
if ( nla_put ( msg , BATADV_ATTR_NEIGH_ADDRESS , ETH_ALEN ,
hardif_neigh - > addr ) | |
2021-05-10 16:05:42 +03:00
nla_put_string ( msg , BATADV_ATTR_HARD_IFNAME ,
hardif_neigh - > if_incoming - > net_dev - > name ) | |
2016-07-03 14:31:40 +03:00
nla_put_u32 ( msg , BATADV_ATTR_HARD_IFINDEX ,
hardif_neigh - > if_incoming - > net_dev - > ifindex ) | |
nla_put_u32 ( msg , BATADV_ATTR_LAST_SEEN_MSECS ,
last_seen_msecs ) )
goto nla_put_failure ;
genlmsg_end ( msg , hdr ) ;
return 0 ;
nla_put_failure :
genlmsg_cancel ( msg , hdr ) ;
return - EMSGSIZE ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_dump_hardif ( ) - Dump the neighbours of a hard interface
2016-07-03 14:31:40 +03:00
* into a message
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
* @ seq : Sequence number of netlink message
* @ bat_priv : The bat priv with all the soft interface information
* @ hard_iface : Hard interface to dump the neighbours for
* @ idx_s : Number of entries to skip
*
* This function assumes the caller holds rcu_read_lock ( ) .
*
* Return : Error code , or 0 on success
*/
static int
batadv_iv_ogm_neigh_dump_hardif ( struct sk_buff * msg , u32 portid , u32 seq ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * hard_iface ,
int * idx_s )
{
struct batadv_hardif_neigh_node * hardif_neigh ;
int idx = 0 ;
hlist_for_each_entry_rcu ( hardif_neigh ,
& hard_iface - > neigh_list , list ) {
if ( idx + + < * idx_s )
continue ;
if ( batadv_iv_ogm_neigh_dump_neigh ( msg , portid , seq ,
hardif_neigh ) ) {
* idx_s = idx - 1 ;
return - EMSGSIZE ;
}
}
* idx_s = 0 ;
return 0 ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_dump ( ) - Dump the neighbours into a message
2016-07-03 14:31:40 +03:00
* @ msg : Netlink message to dump into
* @ cb : Control block containing additional options
* @ bat_priv : The bat priv with all the soft interface information
2020-06-01 21:13:21 +03:00
* @ single_hardif : Limit dump to this hard interface
2016-07-03 14:31:40 +03:00
*/
static void
batadv_iv_ogm_neigh_dump ( struct sk_buff * msg , struct netlink_callback * cb ,
struct batadv_priv * bat_priv ,
struct batadv_hard_iface * single_hardif )
{
struct batadv_hard_iface * hard_iface ;
int i_hardif = 0 ;
int i_hardif_s = cb - > args [ 0 ] ;
int idx = cb - > args [ 1 ] ;
int portid = NETLINK_CB ( cb - > skb ) . portid ;
rcu_read_lock ( ) ;
if ( single_hardif ) {
if ( i_hardif_s = = 0 ) {
if ( batadv_iv_ogm_neigh_dump_hardif ( msg , portid ,
cb - > nlh - > nlmsg_seq ,
bat_priv ,
single_hardif ,
& idx ) = = 0 )
i_hardif + + ;
}
} else {
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list ,
list ) {
if ( hard_iface - > soft_iface ! = bat_priv - > soft_iface )
continue ;
if ( i_hardif + + < i_hardif_s )
continue ;
if ( batadv_iv_ogm_neigh_dump_hardif ( msg , portid ,
cb - > nlh - > nlmsg_seq ,
bat_priv ,
hard_iface , & idx ) ) {
i_hardif - - ;
break ;
}
}
}
rcu_read_unlock ( ) ;
cb - > args [ 0 ] = i_hardif ;
cb - > args [ 1 ] = idx ;
}
2016-07-03 12:07:14 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_cmp ( ) - compare the metrics of two neighbors
2016-07-03 12:07:14 +03:00
* @ neigh1 : the first neighbor object of the comparison
* @ if_outgoing1 : outgoing interface for the first neighbor
* @ neigh2 : the second neighbor object of the comparison
* @ if_outgoing2 : outgoing interface for the second neighbor
*
* Return : a value less , equal to or greater than 0 if the metric via neigh1 is
* lower , the same as or higher than the metric via neigh2
*/
static int batadv_iv_ogm_neigh_cmp ( struct batadv_neigh_node * neigh1 ,
struct batadv_hard_iface * if_outgoing1 ,
struct batadv_neigh_node * neigh2 ,
struct batadv_hard_iface * if_outgoing2 )
{
bool ret ;
int diff ;
ret = batadv_iv_ogm_neigh_diff ( neigh1 , if_outgoing1 , neigh2 ,
if_outgoing2 , & diff ) ;
if ( ! ret )
return 0 ;
2013-11-13 22:14:46 +04:00
return diff ;
2013-09-02 14:15:04 +04:00
}
2013-09-02 14:15:05 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_ogm_neigh_is_sob ( ) - check if neigh1 is similarly good or better
2015-08-08 03:01:50 +03:00
* than neigh2 from the metric prospective
2013-09-02 14:15:05 +04:00
* @ neigh1 : the first neighbor object of the comparison
2014-07-15 11:41:05 +04:00
* @ if_outgoing1 : outgoing interface for the first neighbor
2013-09-02 14:15:05 +04:00
* @ neigh2 : the second neighbor object of the comparison
2013-11-13 22:14:46 +04:00
* @ if_outgoing2 : outgoing interface for the second neighbor
2014-07-15 11:41:05 +04:00
*
2015-09-15 20:00:48 +03:00
* Return : true if the metric via neigh1 is equally good or better than
2013-11-13 22:14:46 +04:00
* the metric via neigh2 , false otherwise .
2013-09-02 14:15:05 +04:00
*/
2013-11-13 22:14:46 +04:00
static bool
2015-08-08 03:01:50 +03:00
batadv_iv_ogm_neigh_is_sob ( struct batadv_neigh_node * neigh1 ,
2013-11-13 22:14:46 +04:00
struct batadv_hard_iface * if_outgoing1 ,
struct batadv_neigh_node * neigh2 ,
struct batadv_hard_iface * if_outgoing2 )
2013-09-02 14:15:05 +04:00
{
2013-11-13 22:14:46 +04:00
bool ret ;
2016-07-03 12:07:14 +03:00
int diff ;
2013-09-02 14:15:05 +04:00
2016-07-03 12:07:14 +03:00
ret = batadv_iv_ogm_neigh_diff ( neigh1 , if_outgoing1 , neigh2 ,
if_outgoing2 , & diff ) ;
if ( ! ret )
return false ;
2013-11-13 22:14:46 +04:00
2016-07-03 12:07:14 +03:00
ret = diff > - BATADV_TQ_SIMILARITY_THRESHOLD ;
2013-11-13 22:14:46 +04:00
return ret ;
2013-09-02 14:15:05 +04:00
}
2019-06-02 11:57:31 +03:00
static void batadv_iv_iface_enabled ( struct batadv_hard_iface * hard_iface )
2016-05-02 20:45:34 +03:00
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule ( hard_iface ) ;
}
2017-03-04 17:48:50 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_init_sel_class ( ) - initialize GW selection class
2017-03-04 17:48:50 +03:00
* @ bat_priv : the bat priv with all the soft interface information
*/
static void batadv_iv_init_sel_class ( struct batadv_priv * bat_priv )
{
/* set default TQ difference threshold to 20 */
atomic_set ( & bat_priv - > gw . sel_class , 20 ) ;
}
2016-07-03 13:46:33 +03:00
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node ( struct batadv_priv * bat_priv )
{
struct batadv_neigh_node * router ;
struct batadv_neigh_ifinfo * router_ifinfo ;
struct batadv_gw_node * gw_node , * curr_gw = NULL ;
u64 max_gw_factor = 0 ;
u64 tmp_gw_factor = 0 ;
u8 max_tq = 0 ;
u8 tq_avg ;
struct batadv_orig_node * orig_node ;
rcu_read_lock ( ) ;
2016-07-27 13:31:08 +03:00
hlist_for_each_entry_rcu ( gw_node , & bat_priv - > gw . gateway_list , list ) {
2016-07-03 13:46:33 +03:00
orig_node = gw_node - > orig_node ;
router = batadv_orig_router_get ( orig_node , BATADV_IF_DEFAULT ) ;
if ( ! router )
continue ;
router_ifinfo = batadv_neigh_ifinfo_get ( router ,
BATADV_IF_DEFAULT ) ;
if ( ! router_ifinfo )
goto next ;
if ( ! kref_get_unless_zero ( & gw_node - > refcount ) )
goto next ;
tq_avg = router_ifinfo - > bat_iv . tq_avg ;
switch ( atomic_read ( & bat_priv - > gw . sel_class ) ) {
case 1 : /* fast connection */
tmp_gw_factor = tq_avg * tq_avg ;
tmp_gw_factor * = gw_node - > bandwidth_down ;
tmp_gw_factor * = 100 * 100 ;
tmp_gw_factor > > = 18 ;
2017-08-23 22:52:13 +03:00
if ( tmp_gw_factor > max_gw_factor | |
( tmp_gw_factor = = max_gw_factor & &
tq_avg > max_tq ) ) {
2021-08-08 20:11:08 +03:00
batadv_gw_node_put ( curr_gw ) ;
2016-07-03 13:46:33 +03:00
curr_gw = gw_node ;
kref_get ( & curr_gw - > refcount ) ;
}
break ;
default : /* 2: stable connection (use best statistic)
* 3 : fast - switch ( use best statistic but change as
* soon as a better gateway appears )
* XX : late - switch ( use best statistic but change as
* soon as a better gateway appears which has
* $ routing_class more tq points )
*/
if ( tq_avg > max_tq ) {
2021-08-08 20:11:08 +03:00
batadv_gw_node_put ( curr_gw ) ;
2016-07-03 13:46:33 +03:00
curr_gw = gw_node ;
kref_get ( & curr_gw - > refcount ) ;
}
break ;
}
if ( tq_avg > max_tq )
max_tq = tq_avg ;
if ( tmp_gw_factor > max_gw_factor )
max_gw_factor = tmp_gw_factor ;
batadv_gw_node_put ( gw_node ) ;
next :
batadv_neigh_node_put ( router ) ;
2021-08-08 20:11:08 +03:00
batadv_neigh_ifinfo_put ( router_ifinfo ) ;
2016-07-03 13:46:33 +03:00
}
rcu_read_unlock ( ) ;
return curr_gw ;
}
static bool batadv_iv_gw_is_eligible ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * curr_gw_orig ,
struct batadv_orig_node * orig_node )
{
struct batadv_neigh_ifinfo * router_orig_ifinfo = NULL ;
struct batadv_neigh_ifinfo * router_gw_ifinfo = NULL ;
struct batadv_neigh_node * router_gw = NULL ;
struct batadv_neigh_node * router_orig = NULL ;
u8 gw_tq_avg , orig_tq_avg ;
bool ret = false ;
/* dynamic re-election is performed only on fast or late switch */
if ( atomic_read ( & bat_priv - > gw . sel_class ) < = 2 )
return false ;
router_gw = batadv_orig_router_get ( curr_gw_orig , BATADV_IF_DEFAULT ) ;
if ( ! router_gw ) {
ret = true ;
goto out ;
}
router_gw_ifinfo = batadv_neigh_ifinfo_get ( router_gw ,
BATADV_IF_DEFAULT ) ;
if ( ! router_gw_ifinfo ) {
ret = true ;
goto out ;
}
router_orig = batadv_orig_router_get ( orig_node , BATADV_IF_DEFAULT ) ;
if ( ! router_orig )
goto out ;
router_orig_ifinfo = batadv_neigh_ifinfo_get ( router_orig ,
BATADV_IF_DEFAULT ) ;
if ( ! router_orig_ifinfo )
goto out ;
gw_tq_avg = router_gw_ifinfo - > bat_iv . tq_avg ;
orig_tq_avg = router_orig_ifinfo - > bat_iv . tq_avg ;
/* the TQ value has to be better */
if ( orig_tq_avg < gw_tq_avg )
goto out ;
/* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
*/
if ( ( atomic_read ( & bat_priv - > gw . sel_class ) > 3 ) & &
( orig_tq_avg - gw_tq_avg < atomic_read ( & bat_priv - > gw . sel_class ) ) )
goto out ;
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i) \n " ,
gw_tq_avg , orig_tq_avg ) ;
ret = true ;
out :
2021-08-08 20:11:08 +03:00
batadv_neigh_ifinfo_put ( router_gw_ifinfo ) ;
batadv_neigh_ifinfo_put ( router_orig_ifinfo ) ;
batadv_neigh_node_put ( router_gw ) ;
batadv_neigh_node_put ( router_orig ) ;
2016-07-03 13:46:33 +03:00
return ret ;
}
2016-07-03 14:31:43 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_gw_dump_entry ( ) - Dump a gateway into a message
2016-07-03 14:31:43 +03:00
* @ msg : Netlink message to dump into
* @ portid : Port making netlink request
2018-10-31 00:01:23 +03:00
* @ cb : Control block containing additional options
2016-07-03 14:31:43 +03:00
* @ bat_priv : The bat priv with all the soft interface information
* @ gw_node : Gateway to be dumped
*
* Return : Error code , or 0 on success
*/
2018-10-31 00:01:23 +03:00
static int batadv_iv_gw_dump_entry ( struct sk_buff * msg , u32 portid ,
struct netlink_callback * cb ,
2016-07-03 14:31:43 +03:00
struct batadv_priv * bat_priv ,
struct batadv_gw_node * gw_node )
{
struct batadv_neigh_ifinfo * router_ifinfo = NULL ;
struct batadv_neigh_node * router ;
2018-06-02 18:26:34 +03:00
struct batadv_gw_node * curr_gw = NULL ;
2018-02-19 16:08:52 +03:00
int ret = 0 ;
2016-07-03 14:31:43 +03:00
void * hdr ;
router = batadv_orig_router_get ( gw_node - > orig_node , BATADV_IF_DEFAULT ) ;
if ( ! router )
goto out ;
router_ifinfo = batadv_neigh_ifinfo_get ( router , BATADV_IF_DEFAULT ) ;
if ( ! router_ifinfo )
goto out ;
curr_gw = batadv_gw_get_selected_gw_node ( bat_priv ) ;
2018-10-31 00:01:23 +03:00
hdr = genlmsg_put ( msg , portid , cb - > nlh - > nlmsg_seq ,
& batadv_netlink_family , NLM_F_MULTI ,
BATADV_CMD_GET_GATEWAYS ) ;
2016-07-03 14:31:43 +03:00
if ( ! hdr ) {
ret = - ENOBUFS ;
goto out ;
}
2018-10-31 00:01:23 +03:00
genl_dump_check_consistent ( cb , hdr ) ;
2016-07-03 14:31:43 +03:00
ret = - EMSGSIZE ;
if ( curr_gw = = gw_node )
if ( nla_put_flag ( msg , BATADV_ATTR_FLAG_BEST ) ) {
genlmsg_cancel ( msg , hdr ) ;
goto out ;
}
if ( nla_put ( msg , BATADV_ATTR_ORIG_ADDRESS , ETH_ALEN ,
gw_node - > orig_node - > orig ) | |
nla_put_u8 ( msg , BATADV_ATTR_TQ , router_ifinfo - > bat_iv . tq_avg ) | |
nla_put ( msg , BATADV_ATTR_ROUTER , ETH_ALEN ,
router - > addr ) | |
nla_put_string ( msg , BATADV_ATTR_HARD_IFNAME ,
router - > if_incoming - > net_dev - > name ) | |
2021-05-10 16:05:42 +03:00
nla_put_u32 ( msg , BATADV_ATTR_HARD_IFINDEX ,
router - > if_incoming - > net_dev - > ifindex ) | |
2016-07-03 14:31:43 +03:00
nla_put_u32 ( msg , BATADV_ATTR_BANDWIDTH_DOWN ,
gw_node - > bandwidth_down ) | |
nla_put_u32 ( msg , BATADV_ATTR_BANDWIDTH_UP ,
gw_node - > bandwidth_up ) ) {
genlmsg_cancel ( msg , hdr ) ;
goto out ;
}
genlmsg_end ( msg , hdr ) ;
ret = 0 ;
out :
2021-08-08 20:11:08 +03:00
batadv_gw_node_put ( curr_gw ) ;
batadv_neigh_ifinfo_put ( router_ifinfo ) ;
batadv_neigh_node_put ( router ) ;
2016-07-03 14:31:43 +03:00
return ret ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_iv_gw_dump ( ) - Dump gateways into a message
2016-07-03 14:31:43 +03:00
* @ msg : Netlink message to dump into
* @ cb : Control block containing additional options
* @ bat_priv : The bat priv with all the soft interface information
*/
static void batadv_iv_gw_dump ( struct sk_buff * msg , struct netlink_callback * cb ,
struct batadv_priv * bat_priv )
{
int portid = NETLINK_CB ( cb - > skb ) . portid ;
struct batadv_gw_node * gw_node ;
int idx_skip = cb - > args [ 0 ] ;
int idx = 0 ;
2018-10-31 00:01:23 +03:00
spin_lock_bh ( & bat_priv - > gw . list_lock ) ;
cb - > seq = bat_priv - > gw . generation < < 1 | 1 ;
hlist_for_each_entry ( gw_node , & bat_priv - > gw . gateway_list , list ) {
2016-07-03 14:31:43 +03:00
if ( idx + + < idx_skip )
continue ;
2018-10-31 00:01:23 +03:00
if ( batadv_iv_gw_dump_entry ( msg , portid , cb , bat_priv ,
gw_node ) ) {
2016-07-03 14:31:43 +03:00
idx_skip = idx - 1 ;
goto unlock ;
}
}
idx_skip = idx ;
unlock :
2018-10-31 00:01:23 +03:00
spin_unlock_bh ( & bat_priv - > gw . list_lock ) ;
2016-07-03 14:31:43 +03:00
cb - > args [ 0 ] = idx_skip ;
}
2012-06-06 00:31:31 +04:00
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
2012-04-18 13:15:57 +04:00
. name = " BATMAN_IV " ,
2016-05-25 18:27:31 +03:00
. iface = {
. enable = batadv_iv_ogm_iface_enable ,
2019-06-02 11:57:31 +03:00
. enabled = batadv_iv_iface_enabled ,
2016-05-25 18:27:31 +03:00
. disable = batadv_iv_ogm_iface_disable ,
. update_mac = batadv_iv_ogm_iface_update_mac ,
. primary_set = batadv_iv_ogm_primary_iface_set ,
} ,
. neigh = {
. cmp = batadv_iv_ogm_neigh_cmp ,
. is_similar_or_better = batadv_iv_ogm_neigh_is_sob ,
2016-07-03 14:31:40 +03:00
. dump = batadv_iv_ogm_neigh_dump ,
2016-05-25 18:27:31 +03:00
} ,
. orig = {
2016-07-03 14:31:40 +03:00
. dump = batadv_iv_ogm_orig_dump ,
2016-05-25 18:27:31 +03:00
} ,
2016-07-03 13:46:33 +03:00
. gw = {
2017-03-04 17:48:50 +03:00
. init_sel_class = batadv_iv_init_sel_class ,
2016-07-03 13:46:33 +03:00
. get_best_gw_node = batadv_iv_gw_get_best_gw_node ,
. is_eligible = batadv_iv_gw_is_eligible ,
2016-07-03 14:31:43 +03:00
. dump = batadv_iv_gw_dump ,
2016-07-03 13:46:33 +03:00
} ,
2011-11-28 13:40:17 +04:00
} ;
2017-12-02 21:51:53 +03:00
/**
* batadv_iv_init ( ) - B . A . T . M . A . N . IV initialization function
*
* Return : 0 on success or negative error number in case of failure
*/
2012-05-12 04:09:22 +04:00
int __init batadv_iv_init ( void )
2011-11-28 13:40:17 +04:00
{
2012-03-04 12:56:25 +04:00
int ret ;
/* batman originator packet */
2012-06-04 00:19:21 +04:00
ret = batadv_recv_handler_register ( BATADV_IV_OGM ,
batadv_iv_ogm_receive ) ;
2012-03-04 12:56:25 +04:00
if ( ret < 0 )
goto out ;
2012-05-12 20:33:51 +04:00
ret = batadv_algo_register ( & batadv_batman_iv ) ;
2012-03-04 12:56:25 +04:00
if ( ret < 0 )
goto handler_unregister ;
goto out ;
handler_unregister :
2012-06-04 00:19:21 +04:00
batadv_recv_handler_unregister ( BATADV_IV_OGM ) ;
2012-03-04 12:56:25 +04:00
out :
return ret ;
2011-11-28 13:40:17 +04:00
}