2017-11-19 17:05:11 +03:00
// SPDX-License-Identifier: GPL-2.0
2021-01-01 02:00:01 +03:00
/* Copyright (C) B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*/
2015-04-17 20:40:28 +03:00
# include "originator.h"
2010-12-13 14:19:28 +03:00
# include "main.h"
2015-04-17 20:40:28 +03:00
2016-01-16 12:29:56 +03:00
# include <linux/atomic.h>
2022-01-21 19:14:44 +03:00
# include <linux/container_of.h>
2015-04-17 20:40:28 +03:00
# include <linux/errno.h>
# include <linux/etherdevice.h>
2017-11-19 19:12:02 +03:00
# include <linux/gfp.h>
2015-04-17 20:40:28 +03:00
# include <linux/jiffies.h>
2016-01-16 12:29:40 +03:00
# include <linux/kref.h>
2015-04-17 20:40:28 +03:00
# include <linux/list.h>
# include <linux/lockdep.h>
# include <linux/netdevice.h>
2016-07-03 14:31:39 +03:00
# include <linux/netlink.h>
2015-06-21 19:30:22 +03:00
# include <linux/rculist.h>
2017-11-19 19:59:13 +03:00
# include <linux/rcupdate.h>
2016-07-03 14:31:39 +03:00
# include <linux/skbuff.h>
2015-04-17 20:40:28 +03:00
# include <linux/slab.h>
# include <linux/spinlock.h>
2017-11-19 19:59:13 +03:00
# include <linux/stddef.h>
2015-04-17 20:40:28 +03:00
# include <linux/workqueue.h>
2016-07-03 14:31:39 +03:00
# include <net/sock.h>
2019-06-11 23:58:40 +03:00
# include <uapi/linux/batadv_packet.h>
2016-07-03 14:31:39 +03:00
# include <uapi/linux/batman_adv.h>
2015-04-17 20:40:28 +03:00
2016-05-15 12:07:44 +03:00
# include "bat_algo.h"
2011-11-23 14:35:44 +04:00
# include "distributed-arp-table.h"
2015-04-17 20:40:28 +03:00
# include "fragmentation.h"
2010-12-13 14:19:28 +03:00
# include "gateway_client.h"
# include "hard-interface.h"
2015-04-17 20:40:28 +03:00
# include "hash.h"
2016-05-16 00:48:31 +03:00
# include "log.h"
2014-02-15 20:47:51 +04:00
# include "multicast.h"
2016-07-03 14:31:39 +03:00
# include "netlink.h"
2015-04-17 20:40:28 +03:00
# include "network-coding.h"
# include "routing.h"
2016-07-03 14:31:39 +03:00
# include "soft-interface.h"
2015-04-17 20:40:28 +03:00
# include "translation-table.h"
2010-12-13 14:19:28 +03:00
2012-11-10 14:00:32 +04:00
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key ;
2017-12-02 21:51:53 +03:00
/**
* batadv_orig_hash_find ( ) - Find and return originator from orig_hash
* @ bat_priv : the bat priv with all the soft interface information
* @ data : mac address of the originator
*
* Return : orig_node ( with increased refcnt ) , NULL on errors
*/
2017-11-19 19:59:13 +03:00
struct batadv_orig_node *
batadv_orig_hash_find ( struct batadv_priv * bat_priv , const void * data )
{
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
struct hlist_head * head ;
struct batadv_orig_node * orig_node , * orig_node_tmp = NULL ;
int index ;
if ( ! hash )
return NULL ;
index = batadv_choose_orig ( data , hash - > size ) ;
head = & hash - > table [ index ] ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( orig_node , head , hash_entry ) {
if ( ! batadv_compare_eth ( orig_node , data ) )
continue ;
if ( ! kref_get_unless_zero ( & orig_node - > refcount ) )
continue ;
orig_node_tmp = orig_node ;
break ;
}
rcu_read_unlock ( ) ;
return orig_node_tmp ;
}
2012-05-12 20:34:00 +04:00
static void batadv_purge_orig ( struct work_struct * work ) ;
2010-12-13 14:19:28 +03:00
2015-09-15 20:00:48 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_compare_orig ( ) - comparing function used in the originator hash table
2015-10-31 14:29:29 +03:00
* @ node : node in the local table
* @ data2 : second object to compare the node to
2015-09-15 20:00:48 +03:00
*
2016-02-22 23:02:39 +03:00
* Return : true if they are the same originator
2015-09-15 20:00:48 +03:00
*/
2016-02-22 23:02:39 +03:00
bool batadv_compare_orig ( const struct hlist_node * node , const void * data2 )
2011-06-15 17:08:59 +04:00
{
2012-06-06 00:31:31 +04:00
const void * data1 = container_of ( node , struct batadv_orig_node ,
hash_entry ) ;
2011-06-15 17:08:59 +04:00
2013-12-26 15:40:39 +04:00
return batadv_compare_eth ( data1 , data2 ) ;
2011-06-15 17:08:59 +04:00
}
2013-07-31 00:16:25 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_vlan_get ( ) - get an orig_node_vlan object
2013-07-31 00:16:25 +04:00
* @ orig_node : the originator serving the VLAN
* @ vid : the VLAN identifier
*
2015-09-15 20:00:48 +03:00
* Return : the vlan object identified by vid and belonging to orig_node or NULL
2013-07-31 00:16:25 +04:00
* if it does not exist .
*/
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_get ( struct batadv_orig_node * orig_node ,
unsigned short vid )
{
struct batadv_orig_node_vlan * vlan = NULL , * tmp ;
rcu_read_lock ( ) ;
2015-06-21 19:30:22 +03:00
hlist_for_each_entry_rcu ( tmp , & orig_node - > vlan_list , list ) {
2013-07-31 00:16:25 +04:00
if ( tmp - > vid ! = vid )
continue ;
2016-01-16 12:29:55 +03:00
if ( ! kref_get_unless_zero ( & tmp - > refcount ) )
2013-07-31 00:16:25 +04:00
continue ;
vlan = tmp ;
break ;
}
rcu_read_unlock ( ) ;
return vlan ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_vlan_new ( ) - search and possibly create an orig_node_vlan
2013-07-31 00:16:25 +04:00
* object
* @ orig_node : the originator serving the VLAN
* @ vid : the VLAN identifier
*
2015-09-15 20:00:48 +03:00
* Return : NULL in case of failure or the vlan object identified by vid and
2013-07-31 00:16:25 +04:00
* belonging to orig_node otherwise . The object is created and added to the list
* if it does not exist .
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new ( struct batadv_orig_node * orig_node ,
unsigned short vid )
{
struct batadv_orig_node_vlan * vlan ;
spin_lock_bh ( & orig_node - > vlan_list_lock ) ;
/* first look if an object for this vid already exists */
vlan = batadv_orig_node_vlan_get ( orig_node , vid ) ;
if ( vlan )
goto out ;
vlan = kzalloc ( sizeof ( * vlan ) , GFP_ATOMIC ) ;
if ( ! vlan )
goto out ;
2016-01-16 12:29:55 +03:00
kref_init ( & vlan - > refcount ) ;
2013-07-31 00:16:25 +04:00
vlan - > vid = vid ;
2016-07-15 18:39:16 +03:00
kref_get ( & vlan - > refcount ) ;
2015-06-21 19:30:22 +03:00
hlist_add_head_rcu ( & vlan - > list , & orig_node - > vlan_list ) ;
2013-07-31 00:16:25 +04:00
out :
spin_unlock_bh ( & orig_node - > vlan_list_lock ) ;
return vlan ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_vlan_release ( ) - release originator - vlan object from lists
2016-01-16 12:29:55 +03:00
* and queue for free after rcu grace period
* @ ref : kref pointer of the originator - vlan object
*/
2021-08-08 20:56:17 +03:00
void batadv_orig_node_vlan_release ( struct kref * ref )
2016-01-16 12:29:55 +03:00
{
struct batadv_orig_node_vlan * orig_vlan ;
orig_vlan = container_of ( ref , struct batadv_orig_node_vlan , refcount ) ;
kfree_rcu ( orig_vlan , rcu ) ;
}
2017-12-02 21:51:53 +03:00
/**
* batadv_originator_init ( ) - Initialize all originator structures
* @ bat_priv : the bat priv with all the soft interface information
*
* Return : 0 on success or negative error number in case of failure
*/
2012-06-06 00:31:31 +04:00
int batadv_originator_init ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
if ( bat_priv - > orig_hash )
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:32 +04:00
bat_priv - > orig_hash = batadv_hash_new ( 1024 ) ;
2010-12-13 14:19:28 +03:00
if ( ! bat_priv - > orig_hash )
goto err ;
2012-11-10 14:00:32 +04:00
batadv_hash_set_lock_class ( bat_priv - > orig_hash ,
& batadv_orig_hash_lock_class_key ) ;
2012-12-25 16:14:37 +04:00
INIT_DELAYED_WORK ( & bat_priv - > orig_work , batadv_purge_orig ) ;
queue_delayed_work ( batadv_event_workqueue ,
& bat_priv - > orig_work ,
msecs_to_jiffies ( BATADV_ORIG_WORK_PERIOD ) ) ;
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
err :
2012-05-05 15:27:28 +04:00
return - ENOMEM ;
2010-12-13 14:19:28 +03:00
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_ifinfo_release ( ) - release neigh_ifinfo from lists and queue for
2016-01-05 14:06:24 +03:00
* free after rcu grace period
2016-01-16 12:29:51 +03:00
* @ ref : kref pointer of the neigh_ifinfo
2013-11-13 22:14:46 +04:00
*/
2021-08-08 20:56:17 +03:00
void batadv_neigh_ifinfo_release ( struct kref * ref )
2013-11-13 22:14:46 +04:00
{
2016-01-16 12:29:51 +03:00
struct batadv_neigh_ifinfo * neigh_ifinfo ;
neigh_ifinfo = container_of ( ref , struct batadv_neigh_ifinfo , refcount ) ;
2016-01-05 14:06:24 +03:00
if ( neigh_ifinfo - > if_outgoing ! = BATADV_IF_DEFAULT )
2016-01-17 13:01:10 +03:00
batadv_hardif_put ( neigh_ifinfo - > if_outgoing ) ;
2016-01-05 14:06:24 +03:00
kfree_rcu ( neigh_ifinfo , rcu ) ;
2013-11-13 22:14:46 +04:00
}
2015-08-04 16:09:55 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_hardif_neigh_release ( ) - release hardif neigh node from lists and
2016-01-05 14:06:23 +03:00
* queue for free after rcu grace period
2016-01-16 12:29:40 +03:00
* @ ref : kref pointer of the neigh_node
2015-08-04 16:09:55 +03:00
*/
2021-08-08 20:56:17 +03:00
void batadv_hardif_neigh_release ( struct kref * ref )
2015-08-04 16:09:55 +03:00
{
2016-01-16 12:29:40 +03:00
struct batadv_hardif_neigh_node * hardif_neigh ;
hardif_neigh = container_of ( ref , struct batadv_hardif_neigh_node ,
refcount ) ;
2016-01-05 14:06:23 +03:00
spin_lock_bh ( & hardif_neigh - > if_incoming - > neigh_list_lock ) ;
hlist_del_init_rcu ( & hardif_neigh - > list ) ;
spin_unlock_bh ( & hardif_neigh - > if_incoming - > neigh_list_lock ) ;
2016-01-05 14:06:17 +03:00
2016-01-17 13:01:10 +03:00
batadv_hardif_put ( hardif_neigh - > if_incoming ) ;
2016-01-05 14:06:23 +03:00
kfree_rcu ( hardif_neigh , rcu ) ;
2015-08-04 16:09:55 +03:00
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_node_release ( ) - release neigh_node from lists and queue for
2016-01-05 14:06:25 +03:00
* free after rcu grace period
2016-01-16 12:29:53 +03:00
* @ ref : kref pointer of the neigh_node
2013-11-13 22:14:46 +04:00
*/
2021-08-08 20:56:17 +03:00
void batadv_neigh_node_release ( struct kref * ref )
2013-11-13 22:14:46 +04:00
{
struct hlist_node * node_tmp ;
2016-01-16 12:29:53 +03:00
struct batadv_neigh_node * neigh_node ;
2013-11-13 22:14:46 +04:00
struct batadv_neigh_ifinfo * neigh_ifinfo ;
2016-01-16 12:29:53 +03:00
neigh_node = container_of ( ref , struct batadv_neigh_node , refcount ) ;
2013-11-13 22:14:46 +04:00
hlist_for_each_entry_safe ( neigh_ifinfo , node_tmp ,
& neigh_node - > ifinfo_list , list ) {
2016-01-17 13:01:12 +03:00
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
2013-11-13 22:14:46 +04:00
}
2015-02-28 19:50:17 +03:00
2016-03-11 18:44:06 +03:00
batadv_hardif_neigh_put ( neigh_node - > hardif_neigh ) ;
2015-08-04 16:09:55 +03:00
2016-01-17 13:01:10 +03:00
batadv_hardif_put ( neigh_node - > if_incoming ) ;
2013-11-13 22:14:46 +04:00
2016-01-05 14:06:25 +03:00
kfree_rcu ( neigh_node , rcu ) ;
2013-11-13 22:14:46 +04:00
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_router_get ( ) - router to the originator depending on iface
2013-11-13 22:14:47 +04:00
* @ orig_node : the orig node for the router
* @ if_outgoing : the interface where the payload packet has been received or
* the OGM should be sent to
*
2020-06-01 21:13:21 +03:00
* Return : the neighbor which should be the router for this orig_node / iface .
2013-11-13 22:14:47 +04:00
*
* The object is returned with refcounter increased by 1.
*/
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node *
2013-11-13 22:14:47 +04:00
batadv_orig_router_get ( struct batadv_orig_node * orig_node ,
const struct batadv_hard_iface * if_outgoing )
2011-03-15 01:43:37 +03:00
{
2013-11-13 22:14:47 +04:00
struct batadv_orig_ifinfo * orig_ifinfo ;
struct batadv_neigh_node * router = NULL ;
2011-03-15 01:43:37 +03:00
rcu_read_lock ( ) ;
2013-11-13 22:14:47 +04:00
hlist_for_each_entry_rcu ( orig_ifinfo , & orig_node - > ifinfo_list , list ) {
if ( orig_ifinfo - > if_outgoing ! = if_outgoing )
continue ;
router = rcu_dereference ( orig_ifinfo - > router ) ;
break ;
}
2011-03-15 01:43:37 +03:00
2016-01-16 12:29:53 +03:00
if ( router & & ! kref_get_unless_zero ( & router - > refcount ) )
2011-03-15 01:43:37 +03:00
router = NULL ;
rcu_read_unlock ( ) ;
return router ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_ifinfo_get ( ) - find the ifinfo from an orig_node
2013-11-13 22:14:47 +04:00
* @ orig_node : the orig node to be queried
* @ if_outgoing : the interface for which the ifinfo should be acquired
*
2015-09-15 20:00:48 +03:00
* Return : the requested orig_ifinfo or NULL if not found .
2013-11-13 22:14:47 +04:00
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_ifinfo *
batadv_orig_ifinfo_get ( struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_orig_ifinfo * tmp , * orig_ifinfo = NULL ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( tmp , & orig_node - > ifinfo_list ,
list ) {
if ( tmp - > if_outgoing ! = if_outgoing )
continue ;
2016-01-16 12:29:52 +03:00
if ( ! kref_get_unless_zero ( & tmp - > refcount ) )
2013-11-13 22:14:47 +04:00
continue ;
orig_ifinfo = tmp ;
break ;
}
rcu_read_unlock ( ) ;
return orig_ifinfo ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_ifinfo_new ( ) - search and possibly create an orig_ifinfo object
2013-11-13 22:14:47 +04:00
* @ orig_node : the orig node to be queried
* @ if_outgoing : the interface for which the ifinfo should be acquired
*
2015-09-15 20:00:48 +03:00
* Return : NULL in case of failure or the orig_ifinfo object for the if_outgoing
2013-11-13 22:14:47 +04:00
* interface otherwise . The object is created and added to the list
* if it does not exist .
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_ifinfo *
batadv_orig_ifinfo_new ( struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * if_outgoing )
{
2016-07-25 01:42:44 +03:00
struct batadv_orig_ifinfo * orig_ifinfo ;
2013-11-13 22:14:47 +04:00
unsigned long reset_time ;
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
orig_ifinfo = batadv_orig_ifinfo_get ( orig_node , if_outgoing ) ;
if ( orig_ifinfo )
goto out ;
orig_ifinfo = kzalloc ( sizeof ( * orig_ifinfo ) , GFP_ATOMIC ) ;
if ( ! orig_ifinfo )
goto out ;
2016-04-11 14:06:40 +03:00
if ( if_outgoing ! = BATADV_IF_DEFAULT )
kref_get ( & if_outgoing - > refcount ) ;
2013-11-13 22:14:47 +04:00
reset_time = jiffies - 1 ;
reset_time - = msecs_to_jiffies ( BATADV_RESET_PROTECTION_MS ) ;
orig_ifinfo - > batman_seqno_reset = reset_time ;
orig_ifinfo - > if_outgoing = if_outgoing ;
INIT_HLIST_NODE ( & orig_ifinfo - > list ) ;
2016-01-16 12:29:52 +03:00
kref_init ( & orig_ifinfo - > refcount ) ;
2016-07-15 18:39:17 +03:00
2016-01-16 12:29:52 +03:00
kref_get ( & orig_ifinfo - > refcount ) ;
2013-11-13 22:14:47 +04:00
hlist_add_head_rcu ( & orig_ifinfo - > list ,
& orig_node - > ifinfo_list ) ;
out :
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
return orig_ifinfo ;
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_ifinfo_get ( ) - find the ifinfo from an neigh_node
2015-09-06 22:38:51 +03:00
* @ neigh : the neigh node to be queried
2013-11-13 22:14:46 +04:00
* @ if_outgoing : the interface for which the ifinfo should be acquired
*
* The object is returned with refcounter increased by 1.
*
2015-09-15 20:00:48 +03:00
* Return : the requested neigh_ifinfo or NULL if not found
2013-11-13 22:14:46 +04:00
*/
struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_get ( struct batadv_neigh_node * neigh ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_neigh_ifinfo * neigh_ifinfo = NULL ,
* tmp_neigh_ifinfo ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( tmp_neigh_ifinfo , & neigh - > ifinfo_list ,
list ) {
if ( tmp_neigh_ifinfo - > if_outgoing ! = if_outgoing )
continue ;
2016-01-16 12:29:51 +03:00
if ( ! kref_get_unless_zero ( & tmp_neigh_ifinfo - > refcount ) )
2013-11-13 22:14:46 +04:00
continue ;
neigh_ifinfo = tmp_neigh_ifinfo ;
break ;
}
rcu_read_unlock ( ) ;
return neigh_ifinfo ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_ifinfo_new ( ) - search and possibly create an neigh_ifinfo object
2015-09-06 22:38:51 +03:00
* @ neigh : the neigh node to be queried
2013-11-13 22:14:46 +04:00
* @ if_outgoing : the interface for which the ifinfo should be acquired
*
2015-09-15 20:00:48 +03:00
* Return : NULL in case of failure or the neigh_ifinfo object for the
2013-11-13 22:14:46 +04:00
* if_outgoing interface otherwise . The object is created and added to the list
* if it does not exist .
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_new ( struct batadv_neigh_node * neigh ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_neigh_ifinfo * neigh_ifinfo ;
spin_lock_bh ( & neigh - > ifinfo_lock ) ;
neigh_ifinfo = batadv_neigh_ifinfo_get ( neigh , if_outgoing ) ;
if ( neigh_ifinfo )
goto out ;
neigh_ifinfo = kzalloc ( sizeof ( * neigh_ifinfo ) , GFP_ATOMIC ) ;
if ( ! neigh_ifinfo )
goto out ;
2016-04-11 14:06:40 +03:00
if ( if_outgoing )
kref_get ( & if_outgoing - > refcount ) ;
2013-11-13 22:14:46 +04:00
INIT_HLIST_NODE ( & neigh_ifinfo - > list ) ;
2016-01-16 12:29:51 +03:00
kref_init ( & neigh_ifinfo - > refcount ) ;
2013-11-13 22:14:46 +04:00
neigh_ifinfo - > if_outgoing = if_outgoing ;
2016-07-15 18:39:19 +03:00
kref_get ( & neigh_ifinfo - > refcount ) ;
2013-11-13 22:14:46 +04:00
hlist_add_head_rcu ( & neigh_ifinfo - > list , & neigh - > ifinfo_list ) ;
out :
spin_unlock_bh ( & neigh - > ifinfo_lock ) ;
return neigh_ifinfo ;
}
2015-08-04 18:31:44 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_node_get ( ) - retrieve a neighbour from the list
2015-08-04 18:31:44 +03:00
* @ orig_node : originator which the neighbour belongs to
* @ hard_iface : the interface where this neighbour is connected to
* @ addr : the address of the neighbour
*
* Looks for and possibly returns a neighbour belonging to this originator list
* which is connected through the provided hard interface .
2015-09-15 20:00:48 +03:00
*
2020-06-01 21:13:21 +03:00
* Return : neighbor when found . Otherwise NULL
2015-08-04 18:31:44 +03:00
*/
static struct batadv_neigh_node *
batadv_neigh_node_get ( const struct batadv_orig_node * orig_node ,
const struct batadv_hard_iface * hard_iface ,
const u8 * addr )
{
struct batadv_neigh_node * tmp_neigh_node , * res = NULL ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( tmp_neigh_node , & orig_node - > neigh_list , list ) {
if ( ! batadv_compare_eth ( tmp_neigh_node - > addr , addr ) )
continue ;
if ( tmp_neigh_node - > if_incoming ! = hard_iface )
continue ;
2016-01-16 12:29:53 +03:00
if ( ! kref_get_unless_zero ( & tmp_neigh_node - > refcount ) )
2015-08-04 18:31:44 +03:00
continue ;
res = tmp_neigh_node ;
break ;
}
rcu_read_unlock ( ) ;
return res ;
}
2015-08-04 16:09:55 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_hardif_neigh_create ( ) - create a hardif neighbour node
2015-08-04 16:09:55 +03:00
* @ hard_iface : the interface this neighbour is connected to
* @ neigh_addr : the interface address of the neighbour to retrieve
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
* @ orig_node : originator object representing the neighbour
2015-08-04 16:09:55 +03:00
*
2015-09-15 20:00:48 +03:00
* Return : the hardif neighbour node if found or created or NULL otherwise .
2015-08-04 16:09:55 +03:00
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_create ( struct batadv_hard_iface * hard_iface ,
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
const u8 * neigh_addr ,
struct batadv_orig_node * orig_node )
2015-08-04 16:09:55 +03:00
{
2015-08-04 16:09:56 +03:00
struct batadv_priv * bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2016-07-25 01:42:44 +03:00
struct batadv_hardif_neigh_node * hardif_neigh ;
2015-08-04 16:09:55 +03:00
spin_lock_bh ( & hard_iface - > neigh_list_lock ) ;
/* check if neighbor hasn't been added in the meantime */
hardif_neigh = batadv_hardif_neigh_get ( hard_iface , neigh_addr ) ;
if ( hardif_neigh )
goto out ;
hardif_neigh = kzalloc ( sizeof ( * hardif_neigh ) , GFP_ATOMIC ) ;
2016-04-11 14:06:40 +03:00
if ( ! hardif_neigh )
2015-08-04 16:09:55 +03:00
goto out ;
2016-04-11 14:06:40 +03:00
kref_get ( & hard_iface - > refcount ) ;
2015-08-04 16:09:55 +03:00
INIT_HLIST_NODE ( & hardif_neigh - > list ) ;
ether_addr_copy ( hardif_neigh - > addr , neigh_addr ) ;
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
ether_addr_copy ( hardif_neigh - > orig , orig_node - > orig ) ;
2015-08-04 16:09:55 +03:00
hardif_neigh - > if_incoming = hard_iface ;
hardif_neigh - > last_seen = jiffies ;
2016-01-16 12:29:40 +03:00
kref_init ( & hardif_neigh - > refcount ) ;
2015-08-04 16:09:55 +03:00
2016-05-25 18:27:31 +03:00
if ( bat_priv - > algo_ops - > neigh . hardif_init )
bat_priv - > algo_ops - > neigh . hardif_init ( hardif_neigh ) ;
2015-08-04 16:09:56 +03:00
2016-09-29 18:22:58 +03:00
hlist_add_head_rcu ( & hardif_neigh - > list , & hard_iface - > neigh_list ) ;
2015-08-04 16:09:55 +03:00
out :
spin_unlock_bh ( & hard_iface - > neigh_list_lock ) ;
return hardif_neigh ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_hardif_neigh_get_or_create ( ) - retrieve or create a hardif neighbour
2015-08-04 16:09:55 +03:00
* node
* @ hard_iface : the interface this neighbour is connected to
* @ neigh_addr : the interface address of the neighbour to retrieve
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
* @ orig_node : originator object representing the neighbour
2015-08-04 16:09:55 +03:00
*
2015-09-15 20:00:48 +03:00
* Return : the hardif neighbour node if found or created or NULL otherwise .
2015-08-04 16:09:55 +03:00
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_get_or_create ( struct batadv_hard_iface * hard_iface ,
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
const u8 * neigh_addr ,
struct batadv_orig_node * orig_node )
2015-08-04 16:09:55 +03:00
{
2016-07-25 01:42:44 +03:00
struct batadv_hardif_neigh_node * hardif_neigh ;
2015-08-04 16:09:55 +03:00
/* first check without locking to avoid the overhead */
hardif_neigh = batadv_hardif_neigh_get ( hard_iface , neigh_addr ) ;
if ( hardif_neigh )
return hardif_neigh ;
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
return batadv_hardif_neigh_create ( hard_iface , neigh_addr , orig_node ) ;
2015-08-04 16:09:55 +03:00
}
/**
2017-12-02 21:51:47 +03:00
* batadv_hardif_neigh_get ( ) - retrieve a hardif neighbour from the list
2015-08-04 16:09:55 +03:00
* @ hard_iface : the interface where this neighbour is connected to
* @ neigh_addr : the address of the neighbour
*
* Looks for and possibly returns a neighbour belonging to this hard interface .
2015-09-15 20:00:48 +03:00
*
2020-06-01 21:13:21 +03:00
* Return : neighbor when found . Otherwise NULL
2015-08-04 16:09:55 +03:00
*/
struct batadv_hardif_neigh_node *
batadv_hardif_neigh_get ( const struct batadv_hard_iface * hard_iface ,
const u8 * neigh_addr )
{
struct batadv_hardif_neigh_node * tmp_hardif_neigh , * hardif_neigh = NULL ;
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( tmp_hardif_neigh ,
& hard_iface - > neigh_list , list ) {
if ( ! batadv_compare_eth ( tmp_hardif_neigh - > addr , neigh_addr ) )
continue ;
2016-01-16 12:29:40 +03:00
if ( ! kref_get_unless_zero ( & tmp_hardif_neigh - > refcount ) )
2015-08-04 16:09:55 +03:00
continue ;
hardif_neigh = tmp_hardif_neigh ;
break ;
}
rcu_read_unlock ( ) ;
return hardif_neigh ;
}
2013-09-02 14:15:01 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_node_create ( ) - create a neigh node object
2015-07-25 23:59:15 +03:00
* @ orig_node : originator object representing the neighbour
2013-09-02 14:15:01 +04:00
* @ hard_iface : the interface where the neighbour is connected to
* @ neigh_addr : the mac address of the neighbour interface
*
* Allocates a new neigh_node object and initialises all the generic fields .
2015-09-15 20:00:48 +03:00
*
2016-05-02 20:52:08 +03:00
* Return : the neighbour node if found or created or NULL otherwise .
2013-09-02 14:15:01 +04:00
*/
2016-05-02 20:52:08 +03:00
static struct batadv_neigh_node *
batadv_neigh_node_create ( struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * hard_iface ,
const u8 * neigh_addr )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node ;
2015-08-04 16:09:55 +03:00
struct batadv_hardif_neigh_node * hardif_neigh = NULL ;
2010-12-13 14:19:28 +03:00
2016-01-07 10:11:12 +03:00
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
2015-07-25 23:57:43 +03:00
neigh_node = batadv_neigh_node_get ( orig_node , hard_iface , neigh_addr ) ;
if ( neigh_node )
goto out ;
2015-08-04 16:09:55 +03:00
hardif_neigh = batadv_hardif_neigh_get_or_create ( hard_iface ,
batman-adv: Simple (re)broadcast avoidance
With this patch, (re)broadcasting on a specific interfaces is avoided:
* No neighbor: There is no need to broadcast on an interface if there
is no node behind it.
* Single neighbor is source: If there is just one neighbor on an
interface and if this neighbor is the one we actually got this
broadcast packet from, then we do not need to echo it back.
* Single neighbor is originator: If there is just one neighbor on
an interface and if this neighbor is the originator of this
broadcast packet, then we do not need to echo it back.
Goodies for BATMAN V:
("Upgrade your BATMAN IV network to V now to get these for free!")
Thanks to the split of OGMv1 into two packet types, OGMv2 and ELP
that is, we can now apply the same optimizations stated above to OGMv2
packets, too.
Furthermore, with BATMAN V, rebroadcasts can be reduced in certain
multi interface cases, too, where BATMAN IV cannot. This is thanks to
the removal of the "secondary interface originator" concept in BATMAN V.
Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
2016-08-07 13:34:19 +03:00
neigh_addr , orig_node ) ;
2015-08-04 16:09:55 +03:00
if ( ! hardif_neigh )
goto out ;
2011-05-15 01:14:54 +04:00
neigh_node = kzalloc ( sizeof ( * neigh_node ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! neigh_node )
2012-03-01 11:35:21 +04:00
goto out ;
2010-12-13 14:19:28 +03:00
2010-12-13 00:57:11 +03:00
INIT_HLIST_NODE ( & neigh_node - > list ) ;
2013-11-13 22:14:46 +04:00
INIT_HLIST_HEAD ( & neigh_node - > ifinfo_list ) ;
spin_lock_init ( & neigh_node - > ifinfo_lock ) ;
2010-12-13 14:19:28 +03:00
2016-04-11 14:06:40 +03:00
kref_get ( & hard_iface - > refcount ) ;
2014-01-22 03:42:11 +04:00
ether_addr_copy ( neigh_node - > addr , neigh_addr ) ;
2013-09-02 14:15:01 +04:00
neigh_node - > if_incoming = hard_iface ;
neigh_node - > orig_node = orig_node ;
2016-03-11 18:01:09 +03:00
neigh_node - > last_seen = jiffies ;
2013-09-02 14:15:01 +04:00
2016-03-11 18:44:06 +03:00
/* increment unique neighbor refcount */
kref_get ( & hardif_neigh - > refcount ) ;
neigh_node - > hardif_neigh = hardif_neigh ;
2011-02-18 15:28:11 +03:00
/* extra reference for return */
2016-01-16 12:29:53 +03:00
kref_init ( & neigh_node - > refcount ) ;
2010-12-13 14:19:28 +03:00
2016-07-15 18:39:20 +03:00
kref_get ( & neigh_node - > refcount ) ;
2015-07-25 23:57:43 +03:00
hlist_add_head_rcu ( & neigh_node - > list , & orig_node - > neigh_list ) ;
batadv_dbg ( BATADV_DBG_BATMAN , orig_node - > bat_priv ,
" Creating new neighbor %pM for orig_node %pM on interface %s \n " ,
neigh_addr , orig_node - > orig , hard_iface - > net_dev - > name ) ;
2012-03-01 11:35:21 +04:00
out :
2016-01-07 10:11:12 +03:00
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
2021-08-08 20:11:08 +03:00
batadv_hardif_neigh_put ( hardif_neigh ) ;
2010-12-13 14:19:28 +03:00
return neigh_node ;
}
2016-05-02 20:52:08 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_neigh_node_get_or_create ( ) - retrieve or create a neigh node object
2016-05-02 20:52:08 +03:00
* @ orig_node : originator object representing the neighbour
* @ hard_iface : the interface where the neighbour is connected to
* @ neigh_addr : the mac address of the neighbour interface
*
* Return : the neighbour node if found or created or NULL otherwise .
*/
struct batadv_neigh_node *
batadv_neigh_node_get_or_create ( struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * hard_iface ,
const u8 * neigh_addr )
{
2016-07-25 01:42:44 +03:00
struct batadv_neigh_node * neigh_node ;
2016-05-02 20:52:08 +03:00
/* first check without locking to avoid the overhead */
neigh_node = batadv_neigh_node_get ( orig_node , hard_iface , neigh_addr ) ;
if ( neigh_node )
return neigh_node ;
return batadv_neigh_node_create ( orig_node , hard_iface , neigh_addr ) ;
}
2016-07-03 14:31:39 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_hardif_neigh_dump ( ) - Dump to netlink the neighbor infos for a
* specific outgoing interface
2016-07-03 14:31:39 +03:00
* @ msg : message to dump into
* @ cb : parameters for the dump
*
* Return : 0 or error value
*/
int batadv_hardif_neigh_dump ( struct sk_buff * msg , struct netlink_callback * cb )
{
struct net * net = sock_net ( cb - > skb - > sk ) ;
struct net_device * soft_iface ;
struct net_device * hard_iface = NULL ;
struct batadv_hard_iface * hardif = BATADV_IF_DEFAULT ;
struct batadv_priv * bat_priv ;
struct batadv_hard_iface * primary_if = NULL ;
int ret ;
int ifindex , hard_ifindex ;
ifindex = batadv_netlink_get_ifindex ( cb - > nlh , BATADV_ATTR_MESH_IFINDEX ) ;
if ( ! ifindex )
return - EINVAL ;
soft_iface = dev_get_by_index ( net , ifindex ) ;
if ( ! soft_iface | | ! batadv_softif_is_valid ( soft_iface ) ) {
ret = - ENODEV ;
goto out ;
}
bat_priv = netdev_priv ( soft_iface ) ;
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if | | primary_if - > if_status ! = BATADV_IF_ACTIVE ) {
ret = - ENOENT ;
goto out ;
}
hard_ifindex = batadv_netlink_get_ifindex ( cb - > nlh ,
BATADV_ATTR_HARD_IFINDEX ) ;
if ( hard_ifindex ) {
hard_iface = dev_get_by_index ( net , hard_ifindex ) ;
if ( hard_iface )
hardif = batadv_hardif_get_by_netdev ( hard_iface ) ;
if ( ! hardif ) {
ret = - ENODEV ;
goto out ;
}
if ( hardif - > soft_iface ! = soft_iface ) {
ret = - ENOENT ;
goto out ;
}
}
if ( ! bat_priv - > algo_ops - > neigh . dump ) {
ret = - EOPNOTSUPP ;
goto out ;
}
bat_priv - > algo_ops - > neigh . dump ( msg , cb , bat_priv , hardif ) ;
ret = msg - > len ;
out :
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( hardif ) ;
2021-08-05 14:55:27 +03:00
dev_put ( hard_iface ) ;
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( primary_if ) ;
2021-08-05 14:55:27 +03:00
dev_put ( soft_iface ) ;
2016-07-03 14:31:39 +03:00
return ret ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_ifinfo_release ( ) - release orig_ifinfo from lists and queue for
2016-01-05 14:06:22 +03:00
* free after rcu grace period
2016-01-16 12:29:52 +03:00
* @ ref : kref pointer of the orig_ifinfo
2013-11-13 22:14:47 +04:00
*/
2021-08-08 20:56:17 +03:00
void batadv_orig_ifinfo_release ( struct kref * ref )
2013-11-13 22:14:47 +04:00
{
2016-01-16 12:29:52 +03:00
struct batadv_orig_ifinfo * orig_ifinfo ;
2014-03-26 18:46:22 +04:00
struct batadv_neigh_node * router ;
2013-11-13 22:14:47 +04:00
2016-01-16 12:29:52 +03:00
orig_ifinfo = container_of ( ref , struct batadv_orig_ifinfo , refcount ) ;
2013-11-13 22:14:47 +04:00
if ( orig_ifinfo - > if_outgoing ! = BATADV_IF_DEFAULT )
2016-01-17 13:01:10 +03:00
batadv_hardif_put ( orig_ifinfo - > if_outgoing ) ;
2013-11-13 22:14:47 +04:00
2014-03-26 18:46:22 +04:00
/* this is the last reference to this object */
router = rcu_dereference_protected ( orig_ifinfo - > router , true ) ;
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( router ) ;
2016-01-05 14:06:22 +03:00
kfree_rcu ( orig_ifinfo , rcu ) ;
2013-11-13 22:14:47 +04:00
}
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_free_rcu ( ) - free the orig_node
2016-01-05 14:06:21 +03:00
* @ rcu : rcu pointer of the orig_node
2013-11-13 22:14:47 +04:00
*/
2016-01-05 14:06:21 +03:00
static void batadv_orig_node_free_rcu ( struct rcu_head * rcu )
2013-11-13 22:14:47 +04:00
{
2016-01-05 14:06:21 +03:00
struct batadv_orig_node * orig_node ;
orig_node = container_of ( rcu , struct batadv_orig_node , rcu ) ;
batadv_mcast_purge_orig ( orig_node ) ;
batadv_frag_purge_orig ( orig_node , NULL ) ;
kfree ( orig_node - > tt_buff ) ;
kfree ( orig_node ) ;
2013-11-13 22:14:47 +04:00
}
2016-01-05 14:06:21 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_release ( ) - release orig_node from lists and queue for
2016-01-05 14:06:21 +03:00
* free after rcu grace period
2016-01-16 12:29:56 +03:00
* @ ref : kref pointer of the orig_node
2016-01-05 14:06:21 +03:00
*/
2021-08-08 20:56:17 +03:00
void batadv_orig_node_release ( struct kref * ref )
2010-12-13 14:19:28 +03:00
{
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * node_tmp ;
2013-11-13 22:14:45 +04:00
struct batadv_neigh_node * neigh_node ;
2016-01-16 12:29:56 +03:00
struct batadv_orig_node * orig_node ;
2013-11-13 22:14:47 +04:00
struct batadv_orig_ifinfo * orig_ifinfo ;
2016-06-30 21:10:46 +03:00
struct batadv_orig_node_vlan * vlan ;
2016-06-30 22:41:13 +03:00
struct batadv_orig_ifinfo * last_candidate ;
2011-01-19 23:01:42 +03:00
2016-01-16 12:29:56 +03:00
orig_node = container_of ( ref , struct batadv_orig_node , refcount ) ;
2010-12-13 00:57:12 +03:00
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
2010-12-13 14:19:28 +03:00
/* for all neighbors towards this originator ... */
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( neigh_node , node_tmp ,
2010-12-13 00:57:11 +03:00
& orig_node - > neigh_list , list ) {
2010-12-13 00:57:12 +03:00
hlist_del_rcu ( & neigh_node - > list ) ;
2016-01-17 13:01:11 +03:00
batadv_neigh_node_put ( neigh_node ) ;
2010-12-13 14:19:28 +03:00
}
2013-11-13 22:14:47 +04:00
hlist_for_each_entry_safe ( orig_ifinfo , node_tmp ,
& orig_node - > ifinfo_list , list ) {
hlist_del_rcu ( & orig_ifinfo - > list ) ;
2016-01-17 13:01:13 +03:00
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
2013-11-13 22:14:47 +04:00
}
2016-06-30 22:41:13 +03:00
last_candidate = orig_node - > last_bonding_candidate ;
orig_node - > last_bonding_candidate = NULL ;
2010-12-13 00:57:12 +03:00
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
2021-08-08 20:11:08 +03:00
batadv_orig_ifinfo_put ( last_candidate ) ;
2016-06-30 22:41:13 +03:00
2016-06-30 21:10:46 +03:00
spin_lock_bh ( & orig_node - > vlan_list_lock ) ;
hlist_for_each_entry_safe ( vlan , node_tmp , & orig_node - > vlan_list , list ) {
hlist_del_rcu ( & vlan - > list ) ;
batadv_orig_node_vlan_put ( vlan ) ;
}
spin_unlock_bh ( & orig_node - > vlan_list_lock ) ;
2013-01-25 14:12:39 +04:00
/* Free nc_nodes */
batadv_nc_purge_orig ( orig_node - > bat_priv , orig_node , NULL ) ;
2016-01-05 14:06:21 +03:00
call_rcu ( & orig_node - > rcu , batadv_orig_node_free_rcu ) ;
2010-12-13 14:19:28 +03:00
}
2017-12-02 21:51:53 +03:00
/**
* batadv_originator_free ( ) - Free all originator structures
* @ bat_priv : the bat priv with all the soft interface information
*/
2012-06-06 00:31:31 +04:00
void batadv_originator_free ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * node_tmp ;
2011-01-19 23:01:42 +03:00
struct hlist_head * head ;
spinlock_t * list_lock ; /* spinlock to protect write access */
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2015-05-26 19:34:26 +03:00
u32 i ;
2011-01-19 23:01:42 +03:00
if ( ! hash )
2010-12-13 14:19:28 +03:00
return ;
cancel_delayed_work_sync ( & bat_priv - > orig_work ) ;
bat_priv - > orig_hash = NULL ;
2011-01-19 23:01:42 +03:00
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
list_lock = & hash - > list_locks [ i ] ;
spin_lock_bh ( list_lock ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( orig_node , node_tmp ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_del_rcu ( & orig_node - > hash_entry ) ;
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2011-01-19 23:01:42 +03:00
}
spin_unlock_bh ( list_lock ) ;
}
2012-05-12 04:09:32 +04:00
batadv_hash_destroy ( hash ) ;
2010-12-13 14:19:28 +03:00
}
2013-09-02 14:15:02 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_node_new ( ) - creates a new orig_node
2013-09-02 14:15:02 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ addr : the mac address of the originator
*
2020-06-01 21:13:21 +03:00
* Creates a new originator object and initialises all the generic fields .
2013-09-02 14:15:02 +04:00
* The new object is not added to the originator list .
2015-09-15 20:00:48 +03:00
*
* Return : the newly created object or NULL on failure .
2012-05-12 04:09:43 +04:00
*/
2013-09-02 14:15:02 +04:00
struct batadv_orig_node * batadv_orig_node_new ( struct batadv_priv * bat_priv ,
2015-05-26 19:34:26 +03:00
const u8 * addr )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2013-07-31 00:16:25 +04:00
struct batadv_orig_node_vlan * vlan ;
2012-06-04 00:19:17 +04:00
unsigned long reset_time ;
2013-09-02 14:15:02 +04:00
int i ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" Creating new originator: %pM \n " , addr ) ;
2010-12-13 14:19:28 +03:00
2011-05-15 01:14:54 +04:00
orig_node = kzalloc ( sizeof ( * orig_node ) , GFP_ATOMIC ) ;
2010-12-13 14:19:28 +03:00
if ( ! orig_node )
return NULL ;
2010-12-13 00:57:11 +03:00
INIT_HLIST_HEAD ( & orig_node - > neigh_list ) ;
2015-06-21 19:30:22 +03:00
INIT_HLIST_HEAD ( & orig_node - > vlan_list ) ;
2013-11-13 22:14:47 +04:00
INIT_HLIST_HEAD ( & orig_node - > ifinfo_list ) ;
2011-01-26 00:52:11 +03:00
spin_lock_init ( & orig_node - > bcast_seqno_lock ) ;
2010-12-13 00:57:12 +03:00
spin_lock_init ( & orig_node - > neigh_list_lock ) ;
2011-04-27 16:27:44 +04:00
spin_lock_init ( & orig_node - > tt_buff_lock ) ;
2013-07-31 00:16:24 +04:00
spin_lock_init ( & orig_node - > tt_lock ) ;
2013-07-31 00:16:25 +04:00
spin_lock_init ( & orig_node - > vlan_list_lock ) ;
2011-02-18 15:28:10 +03:00
2013-01-25 14:12:39 +04:00
batadv_nc_init_orig ( orig_node ) ;
2011-02-18 15:28:10 +03:00
/* extra reference for return */
2016-01-16 12:29:56 +03:00
kref_init ( & orig_node - > refcount ) ;
2010-12-13 14:19:28 +03:00
2011-01-19 23:01:42 +03:00
orig_node - > bat_priv = bat_priv ;
2014-01-22 03:42:11 +04:00
ether_addr_copy ( orig_node - > orig , addr ) ;
2011-11-23 14:35:44 +04:00
batadv_dat_init_orig_node_addr ( orig_node ) ;
2011-07-07 03:40:57 +04:00
atomic_set ( & orig_node - > last_ttvn , 0 ) ;
2011-05-05 10:42:45 +04:00
orig_node - > tt_buff = NULL ;
2011-04-27 16:27:44 +04:00
orig_node - > tt_buff_len = 0 ;
2014-10-30 08:23:40 +03:00
orig_node - > last_seen = jiffies ;
2012-06-04 00:19:17 +04:00
reset_time = jiffies - 1 - msecs_to_jiffies ( BATADV_RESET_PROTECTION_MS ) ;
orig_node - > bcast_seqno_reset = reset_time ;
2015-06-16 18:10:26 +03:00
2014-02-15 20:47:51 +04:00
# ifdef CONFIG_BATMAN_ADV_MCAST
2019-06-11 23:58:40 +03:00
orig_node - > mcast_flags = BATADV_MCAST_WANT_NO_RTR4 ;
orig_node - > mcast_flags | = BATADV_MCAST_WANT_NO_RTR6 ;
2015-06-16 18:10:26 +03:00
INIT_HLIST_NODE ( & orig_node - > mcast_want_all_unsnoopables_node ) ;
INIT_HLIST_NODE ( & orig_node - > mcast_want_all_ipv4_node ) ;
INIT_HLIST_NODE ( & orig_node - > mcast_want_all_ipv6_node ) ;
spin_lock_init ( & orig_node - > mcast_handler_lock ) ;
2014-02-15 20:47:51 +04:00
# endif
2010-12-13 14:19:28 +03:00
2013-07-31 00:16:25 +04:00
/* create a vlan object for the "untagged" LAN */
vlan = batadv_orig_node_vlan_new ( orig_node , BATADV_NO_FLAGS ) ;
if ( ! vlan )
goto free_orig_node ;
/* batadv_orig_node_vlan_new() increases the refcounter.
* Immediately release vlan since it is not needed anymore in this
* context
*/
2016-01-17 13:01:24 +03:00
batadv_orig_node_vlan_put ( vlan ) ;
2013-07-31 00:16:25 +04:00
2013-05-23 18:53:02 +04:00
for ( i = 0 ; i < BATADV_FRAG_BUFFER_COUNT ; i + + ) {
2016-07-27 13:31:07 +03:00
INIT_HLIST_HEAD ( & orig_node - > fragments [ i ] . fragment_list ) ;
2013-05-23 18:53:02 +04:00
spin_lock_init ( & orig_node - > fragments [ i ] . lock ) ;
orig_node - > fragments [ i ] . size = 0 ;
}
2010-12-13 14:19:28 +03:00
return orig_node ;
free_orig_node :
kfree ( orig_node ) ;
return NULL ;
}
2014-03-26 18:46:24 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_purge_neigh_ifinfo ( ) - purge obsolete ifinfo entries from neighbor
2014-03-26 18:46:24 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ neigh : orig node which is to be checked
*/
static void
batadv_purge_neigh_ifinfo ( struct batadv_priv * bat_priv ,
struct batadv_neigh_node * neigh )
{
struct batadv_neigh_ifinfo * neigh_ifinfo ;
struct batadv_hard_iface * if_outgoing ;
struct hlist_node * node_tmp ;
spin_lock_bh ( & neigh - > ifinfo_lock ) ;
/* for all ifinfo objects for this neighinator */
hlist_for_each_entry_safe ( neigh_ifinfo , node_tmp ,
& neigh - > ifinfo_list , list ) {
if_outgoing = neigh_ifinfo - > if_outgoing ;
/* always keep the default interface */
if ( if_outgoing = = BATADV_IF_DEFAULT )
continue ;
/* don't purge if the interface is not (going) down */
2017-08-23 22:52:13 +03:00
if ( if_outgoing - > if_status ! = BATADV_IF_INACTIVE & &
if_outgoing - > if_status ! = BATADV_IF_NOT_IN_USE & &
if_outgoing - > if_status ! = BATADV_IF_TO_BE_REMOVED )
2014-03-26 18:46:24 +04:00
continue ;
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" neighbor/ifinfo purge: neighbor %pM, iface: %s \n " ,
neigh - > addr , if_outgoing - > net_dev - > name ) ;
hlist_del_rcu ( & neigh_ifinfo - > list ) ;
2016-01-17 13:01:12 +03:00
batadv_neigh_ifinfo_put ( neigh_ifinfo ) ;
2014-03-26 18:46:24 +04:00
}
spin_unlock_bh ( & neigh - > ifinfo_lock ) ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_purge_orig_ifinfo ( ) - purge obsolete ifinfo entries from originator
2013-11-13 22:14:47 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ orig_node : orig node which is to be checked
*
2015-09-15 20:00:48 +03:00
* Return : true if any ifinfo entry was purged , false otherwise .
2013-11-13 22:14:47 +04:00
*/
static bool
batadv_purge_orig_ifinfo ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node )
{
struct batadv_orig_ifinfo * orig_ifinfo ;
struct batadv_hard_iface * if_outgoing ;
struct hlist_node * node_tmp ;
bool ifinfo_purged = false ;
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
/* for all ifinfo objects for this originator */
hlist_for_each_entry_safe ( orig_ifinfo , node_tmp ,
& orig_node - > ifinfo_list , list ) {
if_outgoing = orig_ifinfo - > if_outgoing ;
/* always keep the default interface */
if ( if_outgoing = = BATADV_IF_DEFAULT )
continue ;
/* don't purge if the interface is not (going) down */
2017-08-23 22:52:13 +03:00
if ( if_outgoing - > if_status ! = BATADV_IF_INACTIVE & &
if_outgoing - > if_status ! = BATADV_IF_NOT_IN_USE & &
if_outgoing - > if_status ! = BATADV_IF_TO_BE_REMOVED )
2013-11-13 22:14:47 +04:00
continue ;
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
" router/ifinfo purge: originator %pM, iface: %s \n " ,
orig_node - > orig , if_outgoing - > net_dev - > name ) ;
ifinfo_purged = true ;
hlist_del_rcu ( & orig_ifinfo - > list ) ;
2016-01-17 13:01:13 +03:00
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
2013-11-13 22:14:50 +04:00
if ( orig_node - > last_bonding_candidate = = orig_ifinfo ) {
orig_node - > last_bonding_candidate = NULL ;
2016-01-17 13:01:13 +03:00
batadv_orig_ifinfo_put ( orig_ifinfo ) ;
2013-11-13 22:14:50 +04:00
}
2013-11-13 22:14:47 +04:00
}
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
return ifinfo_purged ;
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_purge_orig_neighbors ( ) - purges neighbors from originator
2013-11-13 22:14:46 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ orig_node : orig node which is to be checked
*
2015-09-15 20:00:48 +03:00
* Return : true if any neighbor was purged , false otherwise
2013-11-13 22:14:46 +04:00
*/
2012-06-06 00:31:31 +04:00
static bool
batadv_purge_orig_neighbors ( struct batadv_priv * bat_priv ,
2013-11-13 22:14:46 +04:00
struct batadv_orig_node * orig_node )
2010-12-13 14:19:28 +03:00
{
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * node_tmp ;
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * neigh_node ;
2010-12-13 14:19:28 +03:00
bool neigh_purged = false ;
2012-03-01 11:35:20 +04:00
unsigned long last_seen ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * if_incoming ;
2010-12-13 14:19:28 +03:00
2010-12-13 00:57:12 +03:00
spin_lock_bh ( & orig_node - > neigh_list_lock ) ;
2010-12-13 14:19:28 +03:00
/* for all neighbors towards this originator ... */
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( neigh_node , node_tmp ,
2010-12-13 00:57:11 +03:00
& orig_node - > neigh_list , list ) {
2012-05-12 15:48:58 +04:00
last_seen = neigh_node - > last_seen ;
if_incoming = neigh_node - > if_incoming ;
2017-08-23 22:52:13 +03:00
if ( batadv_has_timed_out ( last_seen , BATADV_PURGE_TIMEOUT ) | |
if_incoming - > if_status = = BATADV_IF_INACTIVE | |
if_incoming - > if_status = = BATADV_IF_NOT_IN_USE | |
if_incoming - > if_status = = BATADV_IF_TO_BE_REMOVED ) {
if ( if_incoming - > if_status = = BATADV_IF_INACTIVE | |
if_incoming - > if_status = = BATADV_IF_NOT_IN_USE | |
if_incoming - > if_status = = BATADV_IF_TO_BE_REMOVED )
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" neighbor purge: originator %pM, neighbor: %pM, iface: %s \n " ,
orig_node - > orig , neigh_node - > addr ,
if_incoming - > net_dev - > name ) ;
2010-12-13 14:19:28 +03:00
else
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u \n " ,
orig_node - > orig , neigh_node - > addr ,
jiffies_to_msecs ( last_seen ) ) ;
2010-12-13 14:19:28 +03:00
neigh_purged = true ;
2010-12-13 00:57:11 +03:00
2010-12-13 00:57:12 +03:00
hlist_del_rcu ( & neigh_node - > list ) ;
2016-01-17 13:01:11 +03:00
batadv_neigh_node_put ( neigh_node ) ;
2014-03-26 18:46:24 +04:00
} else {
/* only necessary if not the whole neighbor is to be
* deleted , but some interface has been removed .
*/
batadv_purge_neigh_ifinfo ( bat_priv , neigh_node ) ;
2010-12-13 14:19:28 +03:00
}
}
2010-12-13 00:57:12 +03:00
spin_unlock_bh ( & orig_node - > neigh_list_lock ) ;
2010-12-13 14:19:28 +03:00
return neigh_purged ;
}
2013-11-13 22:14:46 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_find_best_neighbor ( ) - finds the best neighbor after purging
2013-11-13 22:14:46 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ orig_node : orig node which is to be checked
* @ if_outgoing : the interface for which the metric should be compared
*
2015-09-15 20:00:48 +03:00
* Return : the current best neighbor , with refcount increased .
2013-11-13 22:14:46 +04:00
*/
static struct batadv_neigh_node *
batadv_find_best_neighbor ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node ,
struct batadv_hard_iface * if_outgoing )
{
struct batadv_neigh_node * best = NULL , * neigh ;
2016-05-25 18:27:31 +03:00
struct batadv_algo_ops * bao = bat_priv - > algo_ops ;
2013-11-13 22:14:46 +04:00
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( neigh , & orig_node - > neigh_list , list ) {
2016-05-25 18:27:31 +03:00
if ( best & & ( bao - > neigh . cmp ( neigh , if_outgoing , best ,
if_outgoing ) < = 0 ) )
2013-11-13 22:14:46 +04:00
continue ;
2016-01-16 12:29:53 +03:00
if ( ! kref_get_unless_zero ( & neigh - > refcount ) )
2013-11-13 22:14:46 +04:00
continue ;
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( best ) ;
2013-11-13 22:14:46 +04:00
best = neigh ;
}
rcu_read_unlock ( ) ;
return best ;
}
2013-11-13 22:14:47 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_purge_orig_node ( ) - purges obsolete information from an orig_node
2013-11-13 22:14:47 +04:00
* @ bat_priv : the bat priv with all the soft interface information
* @ orig_node : orig node which is to be checked
*
* This function checks if the orig_node or substructures of it have become
* obsolete , and purges this information if that ' s the case .
*
2015-09-15 20:00:48 +03:00
* Return : true if the orig_node is to be removed , false otherwise .
2013-11-13 22:14:47 +04:00
*/
2012-06-06 00:31:31 +04:00
static bool batadv_purge_orig_node ( struct batadv_priv * bat_priv ,
struct batadv_orig_node * orig_node )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_neigh_node * best_neigh_node ;
2013-11-13 22:14:47 +04:00
struct batadv_hard_iface * hard_iface ;
2014-03-26 18:46:23 +04:00
bool changed_ifinfo , changed_neigh ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:17 +04:00
if ( batadv_has_timed_out ( orig_node - > last_seen ,
2 * BATADV_PURGE_TIMEOUT ) ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Originator timeout: originator %pM, last_seen %u \n " ,
orig_node - > orig ,
jiffies_to_msecs ( orig_node - > last_seen ) ) ;
2010-12-13 14:19:28 +03:00
return true ;
}
2014-03-26 18:46:23 +04:00
changed_ifinfo = batadv_purge_orig_ifinfo ( bat_priv , orig_node ) ;
changed_neigh = batadv_purge_orig_neighbors ( bat_priv , orig_node ) ;
2013-11-13 22:14:47 +04:00
2014-03-26 18:46:23 +04:00
if ( ! changed_ifinfo & & ! changed_neigh )
2013-11-13 22:14:46 +04:00
return false ;
2013-11-13 22:14:47 +04:00
/* first for NULL ... */
2013-11-13 22:14:46 +04:00
best_neigh_node = batadv_find_best_neighbor ( bat_priv , orig_node ,
BATADV_IF_DEFAULT ) ;
2013-11-13 22:14:47 +04:00
batadv_update_route ( bat_priv , orig_node , BATADV_IF_DEFAULT ,
best_neigh_node ) ;
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( best_neigh_node ) ;
2010-12-13 14:19:28 +03:00
2013-11-13 22:14:47 +04:00
/* ... then for all other interfaces. */
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list , list ) {
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
continue ;
if ( hard_iface - > soft_iface ! = bat_priv - > soft_iface )
continue ;
2016-03-05 18:09:16 +03:00
if ( ! kref_get_unless_zero ( & hard_iface - > refcount ) )
continue ;
2013-11-13 22:14:47 +04:00
best_neigh_node = batadv_find_best_neighbor ( bat_priv ,
orig_node ,
hard_iface ) ;
batadv_update_route ( bat_priv , orig_node , hard_iface ,
best_neigh_node ) ;
2021-08-08 20:11:08 +03:00
batadv_neigh_node_put ( best_neigh_node ) ;
2016-03-05 18:09:16 +03:00
batadv_hardif_put ( hard_iface ) ;
2013-11-13 22:14:47 +04:00
}
rcu_read_unlock ( ) ;
2010-12-13 14:19:28 +03:00
return false ;
}
2018-07-07 22:46:11 +03:00
/**
* batadv_purge_orig_ref ( ) - Purge all outdated originators
* @ bat_priv : the bat priv with all the soft interface information
*/
void batadv_purge_orig_ref ( struct batadv_priv * bat_priv )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:28 +04:00
struct batadv_hashtable * hash = bat_priv - > orig_hash ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * node_tmp ;
2010-12-13 14:19:28 +03:00
struct hlist_head * head ;
2011-01-19 23:01:40 +03:00
spinlock_t * list_lock ; /* spinlock to protect write access */
2012-06-06 00:31:31 +04:00
struct batadv_orig_node * orig_node ;
2015-05-26 19:34:26 +03:00
u32 i ;
2010-12-13 14:19:28 +03:00
if ( ! hash )
return ;
/* for all origins... */
for ( i = 0 ; i < hash - > size ; i + + ) {
head = & hash - > table [ i ] ;
2011-01-19 23:01:40 +03:00
list_lock = & hash - > list_locks [ i ] ;
2010-12-13 14:19:28 +03:00
2011-01-19 23:01:40 +03:00
spin_lock_bh ( list_lock ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( orig_node , node_tmp ,
2011-02-18 15:28:09 +03:00
head , hash_entry ) {
2012-05-12 20:34:00 +04:00
if ( batadv_purge_orig_node ( bat_priv , orig_node ) ) {
2013-04-23 17:39:58 +04:00
batadv_gw_node_delete ( bat_priv , orig_node ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_del_rcu ( & orig_node - > hash_entry ) ;
2014-12-14 01:32:15 +03:00
batadv_tt_global_del_orig ( orig_node - > bat_priv ,
orig_node , - 1 ,
" originator timed out " ) ;
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node ) ;
2011-01-19 23:01:40 +03:00
continue ;
2010-12-13 14:19:28 +03:00
}
2013-05-23 18:53:02 +04:00
batadv_frag_purge_orig ( orig_node ,
batadv_frag_check_entry ) ;
2010-12-13 14:19:28 +03:00
}
2011-01-19 23:01:40 +03:00
spin_unlock_bh ( list_lock ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:29 +04:00
batadv_gw_election ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 20:34:00 +04:00
static void batadv_purge_orig ( struct work_struct * work )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct delayed_work * delayed_work ;
struct batadv_priv * bat_priv ;
2010-12-13 14:19:28 +03:00
2015-12-28 18:43:37 +03:00
delayed_work = to_delayed_work ( work ) ;
2012-06-06 00:31:31 +04:00
bat_priv = container_of ( delayed_work , struct batadv_priv , orig_work ) ;
2018-07-07 22:46:11 +03:00
batadv_purge_orig_ref ( bat_priv ) ;
2012-12-25 16:14:37 +04:00
queue_delayed_work ( batadv_event_workqueue ,
& bat_priv - > orig_work ,
msecs_to_jiffies ( BATADV_ORIG_WORK_PERIOD ) ) ;
2010-12-13 14:19:28 +03:00
}
2016-07-03 14:31:39 +03:00
/**
2017-12-02 21:51:47 +03:00
* batadv_orig_dump ( ) - Dump to netlink the originator infos for a specific
2016-07-03 14:31:39 +03:00
* outgoing interface
* @ msg : message to dump into
* @ cb : parameters for the dump
*
* Return : 0 or error value
*/
int batadv_orig_dump ( struct sk_buff * msg , struct netlink_callback * cb )
{
struct net * net = sock_net ( cb - > skb - > sk ) ;
struct net_device * soft_iface ;
struct net_device * hard_iface = NULL ;
struct batadv_hard_iface * hardif = BATADV_IF_DEFAULT ;
struct batadv_priv * bat_priv ;
struct batadv_hard_iface * primary_if = NULL ;
int ret ;
int ifindex , hard_ifindex ;
ifindex = batadv_netlink_get_ifindex ( cb - > nlh , BATADV_ATTR_MESH_IFINDEX ) ;
if ( ! ifindex )
return - EINVAL ;
soft_iface = dev_get_by_index ( net , ifindex ) ;
if ( ! soft_iface | | ! batadv_softif_is_valid ( soft_iface ) ) {
ret = - ENODEV ;
goto out ;
}
bat_priv = netdev_priv ( soft_iface ) ;
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if | | primary_if - > if_status ! = BATADV_IF_ACTIVE ) {
ret = - ENOENT ;
goto out ;
}
hard_ifindex = batadv_netlink_get_ifindex ( cb - > nlh ,
BATADV_ATTR_HARD_IFINDEX ) ;
if ( hard_ifindex ) {
hard_iface = dev_get_by_index ( net , hard_ifindex ) ;
if ( hard_iface )
hardif = batadv_hardif_get_by_netdev ( hard_iface ) ;
if ( ! hardif ) {
ret = - ENODEV ;
goto out ;
}
if ( hardif - > soft_iface ! = soft_iface ) {
ret = - ENOENT ;
goto out ;
}
}
if ( ! bat_priv - > algo_ops - > orig . dump ) {
ret = - EOPNOTSUPP ;
goto out ;
}
bat_priv - > algo_ops - > orig . dump ( msg , cb , bat_priv , hardif ) ;
ret = msg - > len ;
out :
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( hardif ) ;
2021-08-05 14:55:27 +03:00
dev_put ( hard_iface ) ;
2021-08-08 20:11:08 +03:00
batadv_hardif_put ( primary_if ) ;
2021-08-05 14:55:27 +03:00
dev_put ( soft_iface ) ;
2016-07-03 14:31:39 +03:00
return ret ;
}