2012-05-12 04:09:43 +04:00
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# include "main.h"
2012-06-11 01:58:51 +04:00
# include "sysfs.h"
# include "debugfs.h"
2010-12-13 14:19:28 +03:00
# include "routing.h"
# include "send.h"
# include "originator.h"
# include "soft-interface.h"
# include "icmp_socket.h"
# include "translation-table.h"
# include "hard-interface.h"
# include "gateway_client.h"
2012-01-22 23:00:19 +04:00
# include "bridge_loop_avoidance.h"
2010-12-13 14:19:28 +03:00
# include "vis.h"
# include "hash.h"
2011-11-28 13:40:17 +04:00
# include "bat_algo.h"
2010-12-13 14:19:28 +03:00
2011-05-03 13:51:38 +04:00
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
2012-05-12 04:09:43 +04:00
* list traversals just rcu - locked
*/
2012-05-12 04:09:42 +04:00
struct list_head batadv_hardif_list ;
2012-05-16 22:23:19 +04:00
static int ( * batadv_rx_handler [ 256 ] ) ( struct sk_buff * ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * ) ;
2012-05-12 04:09:42 +04:00
char batadv_routing_algo [ 20 ] = " BATMAN_IV " ;
2012-05-16 22:23:19 +04:00
static struct hlist_head batadv_algo_list ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:42 +04:00
unsigned char batadv_broadcast_addr [ ] = { 0xff , 0xff , 0xff , 0xff , 0xff , 0xff } ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:42 +04:00
struct workqueue_struct * batadv_event_workqueue ;
2010-12-13 14:19:28 +03:00
2012-05-16 22:23:19 +04:00
static void batadv_recv_handler_init ( void ) ;
2012-03-01 11:35:17 +04:00
2012-05-16 22:23:19 +04:00
static int __init batadv_init ( void )
2010-12-13 14:19:28 +03:00
{
2012-05-12 04:09:42 +04:00
INIT_LIST_HEAD ( & batadv_hardif_list ) ;
2012-05-16 22:23:19 +04:00
INIT_HLIST_HEAD ( & batadv_algo_list ) ;
2011-11-28 13:40:17 +04:00
2012-05-16 22:23:19 +04:00
batadv_recv_handler_init ( ) ;
2012-03-01 11:35:17 +04:00
2012-05-12 04:09:22 +04:00
batadv_iv_init ( ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:42 +04:00
batadv_event_workqueue = create_singlethread_workqueue ( " bat_events " ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:42 +04:00
if ( ! batadv_event_workqueue )
2010-12-13 14:19:28 +03:00
return - ENOMEM ;
2012-05-12 04:09:33 +04:00
batadv_socket_init ( ) ;
2012-05-12 04:09:23 +04:00
batadv_debugfs_init ( ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:31 +04:00
register_netdevice_notifier ( & batadv_hard_if_notifier ) ;
2010-12-13 14:19:28 +03:00
2012-03-07 12:07:45 +04:00
pr_info ( " B.A.T.M.A.N. advanced %s (compatibility version %i) loaded \n " ,
2012-06-04 00:19:17 +04:00
BATADV_SOURCE_VERSION , BATADV_COMPAT_VERSION ) ;
2010-12-13 14:19:28 +03:00
return 0 ;
}
2012-05-16 22:23:19 +04:00
static void __exit batadv_exit ( void )
2010-12-13 14:19:28 +03:00
{
2012-05-12 04:09:23 +04:00
batadv_debugfs_destroy ( ) ;
2012-05-12 04:09:31 +04:00
unregister_netdevice_notifier ( & batadv_hard_if_notifier ) ;
batadv_hardif_remove_interfaces ( ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:42 +04:00
flush_workqueue ( batadv_event_workqueue ) ;
destroy_workqueue ( batadv_event_workqueue ) ;
batadv_event_workqueue = NULL ;
2010-12-13 14:19:28 +03:00
rcu_barrier ( ) ;
}
2012-05-12 04:09:42 +04:00
int batadv_mesh_init ( struct net_device * soft_iface )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( soft_iface ) ;
2012-05-05 15:27:28 +04:00
int ret ;
2010-12-13 14:19:28 +03:00
spin_lock_init ( & bat_priv - > forw_bat_list_lock ) ;
spin_lock_init ( & bat_priv - > forw_bcast_list_lock ) ;
2012-07-16 00:26:51 +04:00
spin_lock_init ( & bat_priv - > tt . changes_list_lock ) ;
spin_lock_init ( & bat_priv - > tt . req_list_lock ) ;
spin_lock_init ( & bat_priv - > tt . roam_list_lock ) ;
spin_lock_init ( & bat_priv - > tt . last_changeset_lock ) ;
spin_lock_init ( & bat_priv - > gw . list_lock ) ;
spin_lock_init ( & bat_priv - > vis . hash_lock ) ;
spin_lock_init ( & bat_priv - > vis . list_lock ) ;
2010-12-13 14:19:28 +03:00
INIT_HLIST_HEAD ( & bat_priv - > forw_bat_list ) ;
INIT_HLIST_HEAD ( & bat_priv - > forw_bcast_list ) ;
2012-07-16 00:26:51 +04:00
INIT_HLIST_HEAD ( & bat_priv - > gw . list ) ;
INIT_LIST_HEAD ( & bat_priv - > tt . changes_list ) ;
INIT_LIST_HEAD ( & bat_priv - > tt . req_list ) ;
INIT_LIST_HEAD ( & bat_priv - > tt . roam_list ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:34 +04:00
ret = batadv_originator_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-05-12 04:09:39 +04:00
ret = batadv_tt_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-06-04 00:19:17 +04:00
batadv_tt_local_add ( soft_iface , soft_iface - > dev_addr ,
BATADV_NULL_IFINDEX ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:41 +04:00
ret = batadv_vis_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2010-12-13 14:19:28 +03:00
goto err ;
2012-05-12 15:38:47 +04:00
ret = batadv_bla_init ( bat_priv ) ;
2012-05-05 15:27:28 +04:00
if ( ret < 0 )
2012-01-22 23:00:19 +04:00
goto err ;
2012-07-16 00:26:51 +04:00
atomic_set ( & bat_priv - > gw . reselect , 0 ) ;
2012-06-04 00:19:22 +04:00
atomic_set ( & bat_priv - > mesh_state , BATADV_MESH_ACTIVE ) ;
2012-05-05 15:27:28 +04:00
return 0 ;
2010-12-13 14:19:28 +03:00
err :
2012-05-12 04:09:42 +04:00
batadv_mesh_free ( soft_iface ) ;
2012-05-05 15:27:28 +04:00
return ret ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:42 +04:00
void batadv_mesh_free ( struct net_device * soft_iface )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv = netdev_priv ( soft_iface ) ;
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:22 +04:00
atomic_set ( & bat_priv - > mesh_state , BATADV_MESH_DEACTIVATING ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:37 +04:00
batadv_purge_outstanding_packets ( bat_priv , NULL ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:41 +04:00
batadv_vis_quit ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:29 +04:00
batadv_gw_node_purge ( bat_priv ) ;
2012-05-12 04:09:34 +04:00
batadv_originator_free ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 04:09:39 +04:00
batadv_tt_free ( bat_priv ) ;
2010-12-13 14:19:28 +03:00
2012-05-12 15:38:47 +04:00
batadv_bla_free ( bat_priv ) ;
2012-01-22 23:00:19 +04:00
2012-04-20 19:02:45 +04:00
free_percpu ( bat_priv - > bat_counters ) ;
2012-06-04 00:19:22 +04:00
atomic_set ( & bat_priv - > mesh_state , BATADV_MESH_INACTIVE ) ;
2010-12-13 14:19:28 +03:00
}
2012-05-12 04:09:42 +04:00
int batadv_is_my_mac ( const uint8_t * addr )
2010-12-13 14:19:28 +03:00
{
2012-06-06 00:31:31 +04:00
const struct batadv_hard_iface * hard_iface ;
2010-12-13 14:19:28 +03:00
rcu_read_lock ( ) ;
2012-05-12 04:09:42 +04:00
list_for_each_entry_rcu ( hard_iface , & batadv_hardif_list , list ) {
2012-06-04 00:19:19 +04:00
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
2010-12-13 14:19:28 +03:00
continue ;
2012-05-12 15:48:58 +04:00
if ( batadv_compare_eth ( hard_iface - > net_dev - > dev_addr , addr ) ) {
2010-12-13 14:19:28 +03:00
rcu_read_unlock ( ) ;
return 1 ;
}
}
rcu_read_unlock ( ) ;
return 0 ;
}
2012-08-03 19:15:46 +04:00
/**
* batadv_seq_print_text_primary_if_get - called from debugfs table printing
* function that requires the primary interface
* @ seq : debugfs table seq_file struct
*
* Returns primary interface if found or NULL otherwise .
*/
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get ( struct seq_file * seq )
{
struct net_device * net_dev = ( struct net_device * ) seq - > private ;
struct batadv_priv * bat_priv = netdev_priv ( net_dev ) ;
struct batadv_hard_iface * primary_if ;
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
if ( ! primary_if ) {
seq_printf ( seq ,
" BATMAN mesh %s disabled - please specify interfaces to enable it \n " ,
net_dev - > name ) ;
goto out ;
}
if ( primary_if - > if_status = = BATADV_IF_ACTIVE )
goto out ;
seq_printf ( seq ,
" BATMAN mesh %s disabled - primary interface not active \n " ,
net_dev - > name ) ;
batadv_hardif_free_ref ( primary_if ) ;
primary_if = NULL ;
out :
return primary_if ;
}
2012-05-16 22:23:19 +04:00
static int batadv_recv_unhandled_packet ( struct sk_buff * skb ,
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * recv_if )
2012-03-01 11:35:17 +04:00
{
return NET_RX_DROP ;
}
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
2012-05-12 04:09:42 +04:00
int batadv_batman_skb_recv ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * ptype ,
struct net_device * orig_dev )
2012-03-01 11:35:17 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_priv * bat_priv ;
2012-06-06 00:31:30 +04:00
struct batadv_ogm_packet * batadv_ogm_packet ;
2012-06-06 00:31:31 +04:00
struct batadv_hard_iface * hard_iface ;
2012-03-01 11:35:17 +04:00
uint8_t idx ;
int ret ;
2012-06-06 00:31:31 +04:00
hard_iface = container_of ( ptype , struct batadv_hard_iface ,
batman_adv_ptype ) ;
2012-03-01 11:35:17 +04:00
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
/* skb was released by skb_share_check() */
if ( ! skb )
goto err_out ;
/* packet should hold at least type and version */
if ( unlikely ( ! pskb_may_pull ( skb , 2 ) ) )
goto err_free ;
/* expect a valid ethernet header here. */
if ( unlikely ( skb - > mac_len ! = ETH_HLEN | | ! skb_mac_header ( skb ) ) )
goto err_free ;
if ( ! hard_iface - > soft_iface )
goto err_free ;
bat_priv = netdev_priv ( hard_iface - > soft_iface ) ;
2012-06-04 00:19:22 +04:00
if ( atomic_read ( & bat_priv - > mesh_state ) ! = BATADV_MESH_ACTIVE )
2012-03-01 11:35:17 +04:00
goto err_free ;
/* discard frames on not active interfaces */
2012-06-04 00:19:19 +04:00
if ( hard_iface - > if_status ! = BATADV_IF_ACTIVE )
2012-03-01 11:35:17 +04:00
goto err_free ;
2012-06-06 00:31:30 +04:00
batadv_ogm_packet = ( struct batadv_ogm_packet * ) skb - > data ;
2012-03-01 11:35:17 +04:00
2012-06-06 00:31:30 +04:00
if ( batadv_ogm_packet - > header . version ! = BATADV_COMPAT_VERSION ) {
2012-06-04 00:19:22 +04:00
batadv_dbg ( BATADV_DBG_BATMAN , bat_priv ,
2012-05-12 15:48:58 +04:00
" Drop packet: incompatible batman version (%i) \n " ,
2012-06-06 00:31:30 +04:00
batadv_ogm_packet - > header . version ) ;
2012-03-01 11:35:17 +04:00
goto err_free ;
}
/* all receive handlers return whether they received or reused
* the supplied skb . if not , we have to free the skb .
*/
2012-06-06 00:31:30 +04:00
idx = batadv_ogm_packet - > header . packet_type ;
2012-05-16 22:23:19 +04:00
ret = ( * batadv_rx_handler [ idx ] ) ( skb , hard_iface ) ;
2012-03-01 11:35:17 +04:00
if ( ret = = NET_RX_DROP )
kfree_skb ( skb ) ;
/* return NET_RX_SUCCESS in any case as we
* most probably dropped the packet for
* routing - logical reasons .
*/
return NET_RX_SUCCESS ;
err_free :
kfree_skb ( skb ) ;
err_out :
return NET_RX_DROP ;
}
2012-05-16 22:23:19 +04:00
static void batadv_recv_handler_init ( void )
2012-03-01 11:35:17 +04:00
{
int i ;
2012-05-16 22:23:19 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( batadv_rx_handler ) ; i + + )
batadv_rx_handler [ i ] = batadv_recv_unhandled_packet ;
2012-03-01 11:35:17 +04:00
/* batman icmp packet */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_ICMP ] = batadv_recv_icmp_packet ;
2012-10-01 11:57:35 +04:00
/* unicast with 4 addresses packet */
batadv_rx_handler [ BATADV_UNICAST_4ADDR ] = batadv_recv_unicast_packet ;
2012-03-01 11:35:17 +04:00
/* unicast packet */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_UNICAST ] = batadv_recv_unicast_packet ;
2012-03-01 11:35:17 +04:00
/* fragmented unicast packet */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_UNICAST_FRAG ] = batadv_recv_ucast_frag_packet ;
2012-03-01 11:35:17 +04:00
/* broadcast packet */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_BCAST ] = batadv_recv_bcast_packet ;
2012-03-01 11:35:17 +04:00
/* vis packet */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_VIS ] = batadv_recv_vis_packet ;
2012-03-01 11:35:17 +04:00
/* Translation table query (request or response) */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_TT_QUERY ] = batadv_recv_tt_query ;
2012-03-01 11:35:17 +04:00
/* Roaming advertisement */
2012-06-04 00:19:21 +04:00
batadv_rx_handler [ BATADV_ROAM_ADV ] = batadv_recv_roam_adv ;
2012-03-01 11:35:17 +04:00
}
2012-06-06 00:31:31 +04:00
int
batadv_recv_handler_register ( uint8_t packet_type ,
int ( * recv_handler ) ( struct sk_buff * ,
struct batadv_hard_iface * ) )
2012-03-01 11:35:17 +04:00
{
2012-05-16 22:23:19 +04:00
if ( batadv_rx_handler [ packet_type ] ! = & batadv_recv_unhandled_packet )
2012-03-01 11:35:17 +04:00
return - EBUSY ;
2012-05-16 22:23:19 +04:00
batadv_rx_handler [ packet_type ] = recv_handler ;
2012-03-01 11:35:17 +04:00
return 0 ;
}
2012-05-12 04:09:42 +04:00
void batadv_recv_handler_unregister ( uint8_t packet_type )
2012-03-01 11:35:17 +04:00
{
2012-05-16 22:23:19 +04:00
batadv_rx_handler [ packet_type ] = batadv_recv_unhandled_packet ;
2012-03-01 11:35:17 +04:00
}
2012-06-06 00:31:31 +04:00
static struct batadv_algo_ops * batadv_algo_get ( char * name )
2011-11-28 13:40:17 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_algo_ops * bat_algo_ops = NULL , * bat_algo_ops_tmp ;
2011-11-28 13:40:17 +04:00
struct hlist_node * node ;
2012-05-16 22:23:19 +04:00
hlist_for_each_entry ( bat_algo_ops_tmp , node , & batadv_algo_list , list ) {
2011-11-28 13:40:17 +04:00
if ( strcmp ( bat_algo_ops_tmp - > name , name ) ! = 0 )
continue ;
bat_algo_ops = bat_algo_ops_tmp ;
break ;
}
return bat_algo_ops ;
}
2012-06-06 00:31:31 +04:00
int batadv_algo_register ( struct batadv_algo_ops * bat_algo_ops )
2011-11-28 13:40:17 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_algo_ops * bat_algo_ops_tmp ;
2012-05-05 15:27:28 +04:00
int ret ;
2011-11-28 13:40:17 +04:00
2012-05-16 22:23:19 +04:00
bat_algo_ops_tmp = batadv_algo_get ( bat_algo_ops - > name ) ;
2011-11-28 13:40:17 +04:00
if ( bat_algo_ops_tmp ) {
2012-03-07 12:07:45 +04:00
pr_info ( " Trying to register already registered routing algorithm: %s \n " ,
bat_algo_ops - > name ) ;
2012-05-05 15:27:28 +04:00
ret = - EEXIST ;
2011-11-28 13:40:17 +04:00
goto out ;
}
2011-11-28 17:31:55 +04:00
/* all algorithms must implement all ops (for now) */
2012-02-07 13:20:45 +04:00
if ( ! bat_algo_ops - > bat_iface_enable | |
2012-02-07 13:20:47 +04:00
! bat_algo_ops - > bat_iface_disable | |
2012-03-11 02:17:50 +04:00
! bat_algo_ops - > bat_iface_update_mac | |
2012-02-07 13:20:49 +04:00
! bat_algo_ops - > bat_primary_iface_set | |
2011-11-28 17:31:55 +04:00
! bat_algo_ops - > bat_ogm_schedule | |
2012-03-04 12:56:25 +04:00
! bat_algo_ops - > bat_ogm_emit ) {
2011-11-28 17:31:55 +04:00
pr_info ( " Routing algo '%s' does not implement required ops \n " ,
bat_algo_ops - > name ) ;
2012-05-05 15:27:28 +04:00
ret = - EINVAL ;
2011-11-28 17:31:55 +04:00
goto out ;
}
2011-11-28 13:40:17 +04:00
INIT_HLIST_NODE ( & bat_algo_ops - > list ) ;
2012-05-16 22:23:19 +04:00
hlist_add_head ( & bat_algo_ops - > list , & batadv_algo_list ) ;
2011-11-28 13:40:17 +04:00
ret = 0 ;
out :
return ret ;
}
2012-06-06 00:31:31 +04:00
int batadv_algo_select ( struct batadv_priv * bat_priv , char * name )
2011-11-28 13:40:17 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_algo_ops * bat_algo_ops ;
2012-05-05 15:27:28 +04:00
int ret = - EINVAL ;
2011-11-28 13:40:17 +04:00
2012-05-16 22:23:19 +04:00
bat_algo_ops = batadv_algo_get ( name ) ;
2011-11-28 13:40:17 +04:00
if ( ! bat_algo_ops )
goto out ;
bat_priv - > bat_algo_ops = bat_algo_ops ;
ret = 0 ;
out :
return ret ;
}
2012-05-12 04:09:42 +04:00
int batadv_algo_seq_print_text ( struct seq_file * seq , void * offset )
2011-11-28 13:40:17 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_algo_ops * bat_algo_ops ;
2011-11-28 13:40:17 +04:00
struct hlist_node * node ;
seq_printf ( seq , " Available routing algorithms: \n " ) ;
2012-05-16 22:23:19 +04:00
hlist_for_each_entry ( bat_algo_ops , node , & batadv_algo_list , list ) {
2011-11-28 13:40:17 +04:00
seq_printf ( seq , " %s \n " , bat_algo_ops - > name ) ;
}
return 0 ;
}
2012-05-16 22:23:19 +04:00
static int batadv_param_set_ra ( const char * val , const struct kernel_param * kp )
2011-12-10 15:45:53 +04:00
{
2012-06-06 00:31:31 +04:00
struct batadv_algo_ops * bat_algo_ops ;
2012-04-18 13:16:39 +04:00
char * algo_name = ( char * ) val ;
size_t name_len = strlen ( algo_name ) ;
2011-12-10 15:45:53 +04:00
2012-04-18 13:16:39 +04:00
if ( algo_name [ name_len - 1 ] = = ' \n ' )
algo_name [ name_len - 1 ] = ' \0 ' ;
2012-05-16 22:23:19 +04:00
bat_algo_ops = batadv_algo_get ( algo_name ) ;
2011-12-10 15:45:53 +04:00
if ( ! bat_algo_ops ) {
2012-04-18 13:16:39 +04:00
pr_err ( " Routing algorithm '%s' is not supported \n " , algo_name ) ;
2011-12-10 15:45:53 +04:00
return - EINVAL ;
}
2012-04-18 13:16:39 +04:00
return param_set_copystring ( algo_name , kp ) ;
2011-12-10 15:45:53 +04:00
}
2012-05-16 22:23:19 +04:00
static const struct kernel_param_ops batadv_param_ops_ra = {
. set = batadv_param_set_ra ,
2011-12-10 15:45:53 +04:00
. get = param_get_string ,
} ;
2012-05-16 22:23:19 +04:00
static struct kparam_string batadv_param_string_ra = {
2012-05-12 04:09:42 +04:00
. maxlen = sizeof ( batadv_routing_algo ) ,
. string = batadv_routing_algo ,
2011-12-10 15:45:53 +04:00
} ;
2012-05-16 22:23:19 +04:00
module_param_cb ( routing_algo , & batadv_param_ops_ra , & batadv_param_string_ra ,
0644 ) ;
module_init ( batadv_init ) ;
module_exit ( batadv_exit ) ;
2010-12-13 14:19:28 +03:00
MODULE_LICENSE ( " GPL " ) ;
2012-06-04 00:19:17 +04:00
MODULE_AUTHOR ( BATADV_DRIVER_AUTHOR ) ;
MODULE_DESCRIPTION ( BATADV_DRIVER_DESC ) ;
MODULE_SUPPORTED_DEVICE ( BATADV_DRIVER_DEVICE ) ;
MODULE_VERSION ( BATADV_SOURCE_VERSION ) ;