2010-12-13 14:19:28 +03:00
/*
2012-01-01 03:41:38 +04:00
* Copyright ( C ) 2007 - 2012 B . A . T . M . A . N . contributors :
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*
*/
# include "main.h"
# include "bat_sysfs.h"
# include "bat_debugfs.h"
# include "routing.h"
# include "send.h"
# include "originator.h"
# include "soft-interface.h"
# include "icmp_socket.h"
# include "translation-table.h"
# include "hard-interface.h"
# include "gateway_client.h"
2012-01-22 23:00:19 +04:00
# include "bridge_loop_avoidance.h"
2010-12-13 14:19:28 +03:00
# include "vis.h"
# include "hash.h"
2011-11-28 13:40:17 +04:00
# include "bat_algo.h"
2010-12-13 14:19:28 +03:00
2011-05-03 13:51:38 +04:00
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu - locked */
2011-02-18 15:33:19 +03:00
struct list_head hardif_list ;
2012-03-01 11:35:17 +04:00
static int ( * recv_packet_handler [ 256 ] ) ( struct sk_buff * , struct hard_iface * ) ;
2012-04-18 13:15:57 +04:00
char bat_routing_algo [ 20 ] = " BATMAN_IV " ;
2011-11-28 13:40:17 +04:00
static struct hlist_head bat_algo_list ;
2010-12-13 14:19:28 +03:00
unsigned char broadcast_addr [ ] = { 0xff , 0xff , 0xff , 0xff , 0xff , 0xff } ;
struct workqueue_struct * bat_event_workqueue ;
2012-03-01 11:35:17 +04:00
static void recv_handler_init ( void ) ;
2010-12-13 14:19:28 +03:00
static int __init batman_init ( void )
{
2011-02-18 15:33:19 +03:00
INIT_LIST_HEAD ( & hardif_list ) ;
2011-11-28 13:40:17 +04:00
INIT_HLIST_HEAD ( & bat_algo_list ) ;
2012-03-01 11:35:17 +04:00
recv_handler_init ( ) ;
2012-05-12 04:09:22 +04:00
batadv_iv_init ( ) ;
2010-12-13 14:19:28 +03:00
/* the name should not be longer than 10 chars - see
* http : //lwn.net/Articles/23634/ */
bat_event_workqueue = create_singlethread_workqueue ( " bat_events " ) ;
if ( ! bat_event_workqueue )
return - ENOMEM ;
2012-05-12 04:09:33 +04:00
batadv_socket_init ( ) ;
2012-05-12 04:09:23 +04:00
batadv_debugfs_init ( ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:31 +04:00
register_netdevice_notifier ( & batadv_hard_if_notifier ) ;
2010-12-13 14:19:28 +03:00
2012-03-07 12:07:45 +04:00
pr_info ( " B.A.T.M.A.N. advanced %s (compatibility version %i) loaded \n " ,
SOURCE_VERSION , COMPAT_VERSION ) ;
2010-12-13 14:19:28 +03:00
return 0 ;
}
static void __exit batman_exit ( void )
{
2012-05-12 04:09:23 +04:00
batadv_debugfs_destroy ( ) ;
2012-05-12 04:09:31 +04:00
unregister_netdevice_notifier ( & batadv_hard_if_notifier ) ;
batadv_hardif_remove_interfaces ( ) ;
2010-12-13 14:19:28 +03:00
flush_workqueue ( bat_event_workqueue ) ;
destroy_workqueue ( bat_event_workqueue ) ;
bat_event_workqueue = NULL ;
rcu_barrier ( ) ;
}
int mesh_init ( struct net_device * soft_iface )
{
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
2012-05-05 15:27:28 +04:00
int ret ;
2010-12-13 14:19:28 +03:00
spin_lock_init ( & bat_priv - > forw_bat_list_lock ) ;
spin_lock_init ( & bat_priv - > forw_bcast_list_lock ) ;
2011-04-27 16:27:44 +04:00
spin_lock_init ( & bat_priv - > tt_changes_list_lock ) ;
spin_lock_init ( & bat_priv - > tt_req_list_lock ) ;
2011-04-27 16:27:57 +04:00
spin_lock_init ( & bat_priv - > tt_roam_list_lock ) ;
2011-04-27 16:27:44 +04:00
spin_lock_init ( & bat_priv - > tt_buff_lock ) ;
2010-12-13 14:19:28 +03:00
spin_lock_init ( & bat_priv - > gw_list_lock ) ;
spin_lock_init ( & bat_priv - > vis_hash_lock ) ;
spin_lock_init ( & bat_priv - > vis_list_lock ) ;
INIT_HLIST_HEAD ( & bat_priv - > forw_bat_list ) ;
INIT_HLIST_HEAD ( & bat_priv - > forw_bcast_list ) ;
INIT_HLIST_HEAD ( & bat_priv - > gw_list ) ;
2011-04-27 16:27:44 +04:00
INIT_LIST_HEAD ( & bat_priv - > tt_changes_list ) ;
INIT_LIST_HEAD ( & bat_priv - > tt_req_list ) ;
2011-04-27 16:27:57 +04:00
INIT_LIST_HEAD ( & bat_priv - > tt_roam_list ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:34 +04:00
ret = batadv_originator_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-05-12 04:09:39 +04:00
ret = batadv_tt_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-05-12 04:09:39 +04:00
batadv_tt_local_add ( soft_iface , soft_iface - > dev_addr , NULL_IFINDEX ) ;
2010-12-13 14:19:28 +03:00
2012-05-05 15:27:28 +04:00
ret = vis_init ( bat_priv ) ;
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-05-12 15:38:47 +04:00
ret = batadv_bla_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2012-01-22 23:00:19 +04:00
goto err ;
2011-04-27 02:22:00 +04:00
atomic_set ( & bat_priv - > gw_reselect , 0 ) ;
2010-12-13 14:19:28 +03:00
atomic_set ( & bat_priv - > mesh_state , MESH_ACTIVE ) ;
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
err :
mesh_free ( soft_iface ) ;
2012-05-05 15:27:28 +04:00
return ret ;
2010-12-13 14:19:28 +03:00
}
void mesh_free ( struct net_device * soft_iface )
{
struct bat_priv * bat_priv = netdev_priv ( soft_iface ) ;
atomic_set ( & bat_priv - > mesh_state , MESH_DEACTIVATING ) ;
2012-05-12 04:09:37 +04:00
batadv_purge_outstanding_packets ( bat_priv , NULL ) ;
2010-12-13 14:19:28 +03:00
vis_quit ( bat_priv ) ;
2012-05-12 04:09:29 +04:00
batadv_gw_node_purge ( bat_priv ) ;
2012-05-12 04:09:34 +04:00
batadv_originator_free ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:39 +04:00
batadv_tt_free ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 15:38:47 +04:00
batadv_bla_free ( bat_priv ) ;
2012-01-22 23:00:19 +04:00
2012-04-20 19:02:45 +04:00
free_percpu ( bat_priv - > bat_counters ) ;
2010-12-13 14:19:28 +03:00
atomic_set ( & bat_priv - > mesh_state , MESH_INACTIVE ) ;
}
void inc_module_count ( void )
{
try_module_get ( THIS_MODULE ) ;
}
void dec_module_count ( void )
{
module_put ( THIS_MODULE ) ;
}
2011-05-15 01:14:50 +04:00
int is_my_mac ( const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2011-05-15 01:14:50 +04:00
const struct hard_iface * hard_iface ;
2010-12-13 14:19:28 +03:00
rcu_read_lock ( ) ;
2011-02-18 15:33:20 +03:00
list_for_each_entry_rcu ( hard_iface , & hardif_list , list ) {
if ( hard_iface - > if_status ! = IF_ACTIVE )
2010-12-13 14:19:28 +03:00
continue ;
2011-02-18 15:33:20 +03:00
if ( compare_eth ( hard_iface - > net_dev - > dev_addr , addr ) ) {
2010-12-13 14:19:28 +03:00
rcu_read_unlock ( ) ;
return 1 ;
}
}
rcu_read_unlock ( ) ;
return 0 ;
}
2012-03-01 11:35:17 +04:00
static int recv_unhandled_packet ( struct sk_buff * skb ,
struct hard_iface * recv_if )
{
return NET_RX_DROP ;
}
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
int batman_skb_recv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * ptype , struct net_device * orig_dev )
{
struct bat_priv * bat_priv ;
struct batman_ogm_packet * batman_ogm_packet ;
struct hard_iface * hard_iface ;
uint8_t idx ;
int ret ;
hard_iface = container_of ( ptype , struct hard_iface , batman_adv_ptype ) ;
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
/* skb was released by skb_share_check() */
if ( ! skb )
goto err_out ;
/* packet should hold at least type and version */
if ( unlikely ( ! pskb_may_pull ( skb , 2 ) ) )
goto err_free ;
/* expect a valid ethernet header here. */
if ( unlikely ( skb - > mac_len ! = ETH_HLEN | | ! skb_mac_header ( skb ) ) )
goto err_free ;
if ( ! hard_iface - > soft_iface )
goto err_free ;
bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
if ( atomic_read ( & bat_priv - > mesh_state ) ! = MESH_ACTIVE )
goto err_free ;
/* discard frames on not active interfaces */
if ( hard_iface - > if_status ! = IF_ACTIVE )
goto err_free ;
batman_ogm_packet = ( struct batman_ogm_packet * ) skb - > data ;
if ( batman_ogm_packet - > header . version ! = COMPAT_VERSION ) {
bat_dbg ( DBG_BATMAN , bat_priv ,
" Drop packet: incompatible batman version (%i) \n " ,
batman_ogm_packet - > header . version ) ;
goto err_free ;
}
/* all receive handlers return whether they received or reused
* the supplied skb . if not , we have to free the skb .
*/
idx = batman_ogm_packet - > header . packet_type ;
ret = ( * recv_packet_handler [ idx ] ) ( skb , hard_iface ) ;
if ( ret = = NET_RX_DROP )
kfree_skb ( skb ) ;
/* return NET_RX_SUCCESS in any case as we
* most probably dropped the packet for
* routing - logical reasons .
*/
return NET_RX_SUCCESS ;
err_free :
kfree_skb ( skb ) ;
err_out :
return NET_RX_DROP ;
}
static void recv_handler_init ( void )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( recv_packet_handler ) ; i + + )
recv_packet_handler [ i ] = recv_unhandled_packet ;
/* batman icmp packet */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_ICMP ] = batadv_recv_icmp_packet ;
2012-03-01 11:35:17 +04:00
/* unicast packet */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_UNICAST ] = batadv_recv_unicast_packet ;
2012-03-01 11:35:17 +04:00
/* fragmented unicast packet */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_UNICAST_FRAG ] = batadv_recv_ucast_frag_packet ;
2012-03-01 11:35:17 +04:00
/* broadcast packet */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_BCAST ] = batadv_recv_bcast_packet ;
2012-03-01 11:35:17 +04:00
/* vis packet */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_VIS ] = batadv_recv_vis_packet ;
2012-03-01 11:35:17 +04:00
/* Translation table query (request or response) */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_TT_QUERY ] = batadv_recv_tt_query ;
2012-03-01 11:35:17 +04:00
/* Roaming advertisement */
2012-05-12 04:09:36 +04:00
recv_packet_handler [ BAT_ROAM_ADV ] = batadv_recv_roam_adv ;
2012-03-01 11:35:17 +04:00
}
int recv_handler_register ( uint8_t packet_type ,
int ( * recv_handler ) ( struct sk_buff * ,
struct hard_iface * ) )
{
if ( recv_packet_handler [ packet_type ] ! = & recv_unhandled_packet )
return - EBUSY ;
recv_packet_handler [ packet_type ] = recv_handler ;
return 0 ;
}
void recv_handler_unregister ( uint8_t packet_type )
{
recv_packet_handler [ packet_type ] = recv_unhandled_packet ;
}
2011-11-28 13:40:17 +04:00
static struct bat_algo_ops * bat_algo_get ( char * name )
{
struct bat_algo_ops * bat_algo_ops = NULL , * bat_algo_ops_tmp ;
struct hlist_node * node ;
hlist_for_each_entry ( bat_algo_ops_tmp , node , & bat_algo_list , list ) {
if ( strcmp ( bat_algo_ops_tmp - > name , name ) ! = 0 )
continue ;
bat_algo_ops = bat_algo_ops_tmp ;
break ;
}
return bat_algo_ops ;
}
int bat_algo_register ( struct bat_algo_ops * bat_algo_ops )
{
struct bat_algo_ops * bat_algo_ops_tmp ;
2012-05-05 15:27:28 +04:00
int ret ;
2011-11-28 13:40:17 +04:00
bat_algo_ops_tmp = bat_algo_get ( bat_algo_ops - > name ) ;
if ( bat_algo_ops_tmp ) {
2012-03-07 12:07:45 +04:00
pr_info ( " Trying to register already registered routing algorithm: %s \n " ,
bat_algo_ops - > name ) ;
2012-05-05 15:27:28 +04:00
ret = - EEXIST ;
2011-11-28 13:40:17 +04:00
goto out ;
}
2011-11-28 17:31:55 +04:00
/* all algorithms must implement all ops (for now) */
2012-02-07 13:20:45 +04:00
if ( ! bat_algo_ops - > bat_iface_enable | |
2012-02-07 13:20:47 +04:00
! bat_algo_ops - > bat_iface_disable | |
2012-03-11 02:17:50 +04:00
! bat_algo_ops - > bat_iface_update_mac | |
2012-02-07 13:20:49 +04:00
! bat_algo_ops - > bat_primary_iface_set | |
2011-11-28 17:31:55 +04:00
! bat_algo_ops - > bat_ogm_schedule | |
2012-03-04 12:56:25 +04:00
! bat_algo_ops - > bat_ogm_emit ) {
2011-11-28 17:31:55 +04:00
pr_info ( " Routing algo '%s' does not implement required ops \n " ,
bat_algo_ops - > name ) ;
2012-05-05 15:27:28 +04:00
ret = - EINVAL ;
2011-11-28 17:31:55 +04:00
goto out ;
}
2011-11-28 13:40:17 +04:00
INIT_HLIST_NODE ( & bat_algo_ops - > list ) ;
hlist_add_head ( & bat_algo_ops - > list , & bat_algo_list ) ;
ret = 0 ;
out :
return ret ;
}
int bat_algo_select ( struct bat_priv * bat_priv , char * name )
{
struct bat_algo_ops * bat_algo_ops ;
2012-05-05 15:27:28 +04:00
int ret = - EINVAL ;
2011-11-28 13:40:17 +04:00
bat_algo_ops = bat_algo_get ( name ) ;
if ( ! bat_algo_ops )
goto out ;
bat_priv - > bat_algo_ops = bat_algo_ops ;
ret = 0 ;
out :
return ret ;
}
int bat_algo_seq_print_text ( struct seq_file * seq , void * offset )
{
struct bat_algo_ops * bat_algo_ops ;
struct hlist_node * node ;
seq_printf ( seq , " Available routing algorithms: \n " ) ;
hlist_for_each_entry ( bat_algo_ops , node , & bat_algo_list , list ) {
seq_printf ( seq , " %s \n " , bat_algo_ops - > name ) ;
}
return 0 ;
}
2011-12-10 15:45:53 +04:00
static int param_set_ra ( const char * val , const struct kernel_param * kp )
{
struct bat_algo_ops * bat_algo_ops ;
2012-04-18 13:16:39 +04:00
char * algo_name = ( char * ) val ;
size_t name_len = strlen ( algo_name ) ;
2011-12-10 15:45:53 +04:00
2012-04-18 13:16:39 +04:00
if ( algo_name [ name_len - 1 ] = = ' \n ' )
algo_name [ name_len - 1 ] = ' \0 ' ;
bat_algo_ops = bat_algo_get ( algo_name ) ;
2011-12-10 15:45:53 +04:00
if ( ! bat_algo_ops ) {
2012-04-18 13:16:39 +04:00
pr_err ( " Routing algorithm '%s' is not supported \n " , algo_name ) ;
2011-12-10 15:45:53 +04:00
return - EINVAL ;
}
2012-04-18 13:16:39 +04:00
return param_set_copystring ( algo_name , kp ) ;
2011-12-10 15:45:53 +04:00
}
static const struct kernel_param_ops param_ops_ra = {
. set = param_set_ra ,
. get = param_get_string ,
} ;
static struct kparam_string __param_string_ra = {
. maxlen = sizeof ( bat_routing_algo ) ,
. string = bat_routing_algo ,
} ;
module_param_cb ( routing_algo , & param_ops_ra , & __param_string_ra , 0644 ) ;
2010-12-13 14:19:28 +03:00
module_init ( batman_init ) ;
module_exit ( batman_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( DRIVER_AUTHOR ) ;
MODULE_DESCRIPTION ( DRIVER_DESC ) ;
MODULE_SUPPORTED_DEVICE ( DRIVER_DEVICE ) ;
MODULE_VERSION ( SOURCE_VERSION ) ;