2012-05-12 04:09:43 +04:00
/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2010-12-13 14:19:28 +03:00
*
* Marek Lindner , Simon Wunderlich
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# ifndef _NET_BATMAN_ADV_TYPES_H_
# define _NET_BATMAN_ADV_TYPES_H_
# include "packet.h"
# include "bitarray.h"
2012-06-04 00:19:14 +04:00
# include <linux/kernel.h>
2010-12-13 14:19:28 +03:00
2012-06-04 00:19:14 +04:00
# define BATADV_HEADER_LEN \
( ETH_HLEN + max ( sizeof ( struct unicast_packet ) , \
sizeof ( struct bcast_packet ) ) )
2010-12-13 14:19:28 +03:00
2011-02-18 15:33:20 +03:00
struct hard_iface {
2010-12-13 14:19:28 +03:00
struct list_head list ;
int16_t if_num ;
char if_status ;
struct net_device * net_dev ;
atomic_t seqno ;
atomic_t frag_seqno ;
unsigned char * packet_buff ;
int packet_len ;
struct kobject * hardif_obj ;
2011-02-10 17:33:51 +03:00
atomic_t refcount ;
2010-12-13 14:19:28 +03:00
struct packet_type batman_adv_ptype ;
struct net_device * soft_iface ;
struct rcu_head rcu ;
} ;
2012-05-12 04:09:43 +04:00
/* orig_node - structure for orig_list maintaining nodes of mesh
2010-12-13 14:19:28 +03:00
* @ primary_addr : hosts primary interface address
2012-03-01 11:35:19 +04:00
* @ last_seen : when last packet from this node was received
2010-12-13 14:19:28 +03:00
* @ bcast_seqno_reset : time when the broadcast seqno window was reset
* @ batman_seqno_reset : time when the batman seqno window was reset
* @ gw_flags : flags related to gateway class
* @ flags : for now only VIS_SERVER flag
2011-07-09 19:52:13 +04:00
* @ last_real_seqno : last and best known sequence number
2010-12-13 14:19:28 +03:00
* @ last_ttl : ttl of last received packet
* @ last_bcast_seqno : last broadcast sequence number received by this host
*
* @ candidates : how many candidates are available
* @ selected : next bonding candidate
*/
struct orig_node {
uint8_t orig [ ETH_ALEN ] ;
uint8_t primary_addr [ ETH_ALEN ] ;
2011-03-15 01:43:37 +03:00
struct neigh_node __rcu * router ; /* rcu protected pointer */
2010-12-13 14:19:28 +03:00
unsigned long * bcast_own ;
uint8_t * bcast_own_sum ;
2012-03-01 11:35:19 +04:00
unsigned long last_seen ;
2010-12-13 14:19:28 +03:00
unsigned long bcast_seqno_reset ;
unsigned long batman_seqno_reset ;
uint8_t gw_flags ;
uint8_t flags ;
2011-04-27 16:27:44 +04:00
atomic_t last_ttvn ; /* last seen translation table version number */
uint16_t tt_crc ;
2011-05-05 10:42:45 +04:00
unsigned char * tt_buff ;
int16_t tt_buff_len ;
2011-04-27 16:27:44 +04:00
spinlock_t tt_buff_lock ; /* protects tt_buff */
atomic_t tt_size ;
2011-11-07 19:36:40 +04:00
bool tt_initialised ;
2011-04-27 16:27:57 +04:00
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
* If true , then I sent a Roaming_adv to this orig_node and I have to
* inspect every packet directed to it to check whether it is still
* the true destination or not . This flag will be reset to false as
2012-05-12 04:09:43 +04:00
* soon as I receive a new TTVN from this orig_node
*/
2011-04-27 16:27:57 +04:00
bool tt_poss_change ;
2010-12-13 14:19:28 +03:00
uint32_t last_real_seqno ;
uint8_t last_ttl ;
2012-06-04 00:19:17 +04:00
DECLARE_BITMAP ( bcast_bits , BATADV_TQ_LOCAL_WINDOW_SIZE ) ;
2010-12-13 14:19:28 +03:00
uint32_t last_bcast_seqno ;
2010-12-13 00:57:11 +03:00
struct hlist_head neigh_list ;
2010-12-13 14:19:28 +03:00
struct list_head frag_list ;
2011-03-15 01:43:37 +03:00
spinlock_t neigh_list_lock ; /* protects neigh_list and router */
2011-02-18 15:28:10 +03:00
atomic_t refcount ;
struct rcu_head rcu ;
2011-02-18 15:28:09 +03:00
struct hlist_node hash_entry ;
2011-01-19 23:01:42 +03:00
struct bat_priv * bat_priv ;
2010-12-13 14:19:28 +03:00
unsigned long last_frag_packet ;
2011-05-08 14:45:45 +04:00
/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
2012-05-12 04:09:43 +04:00
* neigh_node - > real_bits , neigh_node - > real_packet_count
*/
2011-05-08 14:45:45 +04:00
spinlock_t ogm_cnt_lock ;
/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
spinlock_t bcast_seqno_lock ;
2011-04-27 16:27:44 +04:00
spinlock_t tt_list_lock ; /* protects tt_list */
2011-01-19 23:01:43 +03:00
atomic_t bond_candidates ;
struct list_head bond_list ;
2010-12-13 14:19:28 +03:00
} ;
struct gw_node {
struct hlist_node list ;
struct orig_node * orig_node ;
unsigned long deleted ;
2011-02-10 17:33:49 +03:00
atomic_t refcount ;
2010-12-13 14:19:28 +03:00
struct rcu_head rcu ;
} ;
2012-05-12 04:09:43 +04:00
/* neigh_node
2012-03-01 11:35:19 +04:00
* @ last_seen : when last packet via this neighbor was received
2010-12-13 14:19:28 +03:00
*/
struct neigh_node {
2010-12-13 00:57:11 +03:00
struct hlist_node list ;
2010-12-13 14:19:28 +03:00
uint8_t addr [ ETH_ALEN ] ;
uint8_t real_packet_count ;
2012-06-04 00:19:17 +04:00
uint8_t tq_recv [ BATADV_TQ_GLOBAL_WINDOW_SIZE ] ;
2010-12-13 14:19:28 +03:00
uint8_t tq_index ;
uint8_t tq_avg ;
uint8_t last_ttl ;
2011-01-19 23:01:43 +03:00
struct list_head bonding_list ;
2012-03-01 11:35:19 +04:00
unsigned long last_seen ;
2012-06-04 00:19:17 +04:00
DECLARE_BITMAP ( real_bits , BATADV_TQ_LOCAL_WINDOW_SIZE ) ;
2011-02-10 17:33:53 +03:00
atomic_t refcount ;
2010-12-13 00:57:12 +03:00
struct rcu_head rcu ;
2010-12-13 14:19:28 +03:00
struct orig_node * orig_node ;
2011-02-18 15:33:20 +03:00
struct hard_iface * if_incoming ;
2012-03-17 11:28:32 +04:00
spinlock_t lq_update_lock ; /* protects: tq_recv, tq_index */
2010-12-13 14:19:28 +03:00
} ;
2012-01-22 23:00:27 +04:00
# ifdef CONFIG_BATMAN_ADV_BLA
2012-01-22 23:00:24 +04:00
struct bcast_duplist_entry {
uint8_t orig [ ETH_ALEN ] ;
uint16_t crc ;
unsigned long entrytime ;
} ;
2012-01-22 23:00:27 +04:00
# endif
2010-12-13 14:19:28 +03:00
2012-04-20 19:02:45 +04:00
enum bat_counters {
BAT_CNT_FORWARD ,
BAT_CNT_FORWARD_BYTES ,
BAT_CNT_MGMT_TX ,
BAT_CNT_MGMT_TX_BYTES ,
BAT_CNT_MGMT_RX ,
BAT_CNT_MGMT_RX_BYTES ,
BAT_CNT_TT_REQUEST_TX ,
BAT_CNT_TT_REQUEST_RX ,
BAT_CNT_TT_RESPONSE_TX ,
BAT_CNT_TT_RESPONSE_RX ,
BAT_CNT_TT_ROAM_ADV_TX ,
BAT_CNT_TT_ROAM_ADV_RX ,
BAT_CNT_NUM ,
} ;
2010-12-13 14:19:28 +03:00
struct bat_priv {
atomic_t mesh_state ;
struct net_device_stats stats ;
2012-04-20 19:02:45 +04:00
uint64_t __percpu * bat_counters ; /* Per cpu counters */
2010-12-13 14:19:28 +03:00
atomic_t aggregated_ogms ; /* boolean */
atomic_t bonding ; /* boolean */
atomic_t fragmentation ; /* boolean */
2011-07-07 17:35:36 +04:00
atomic_t ap_isolation ; /* boolean */
2012-01-22 23:00:19 +04:00
atomic_t bridge_loop_avoidance ; /* boolean */
2010-12-13 14:19:28 +03:00
atomic_t vis_mode ; /* VIS_TYPE_* */
atomic_t gw_mode ; /* GW_MODE_* */
atomic_t gw_sel_class ; /* uint */
atomic_t gw_bandwidth ; /* gw bandwidth */
atomic_t orig_interval ; /* uint */
atomic_t hop_penalty ; /* uint */
atomic_t log_level ; /* uint */
atomic_t bcast_seqno ;
atomic_t bcast_queue_left ;
atomic_t batman_queue_left ;
2011-07-09 19:52:13 +04:00
atomic_t ttvn ; /* translation table version number */
2011-04-27 16:27:44 +04:00
atomic_t tt_ogm_append_cnt ;
atomic_t tt_local_changes ; /* changes registered in a OGM interval */
2012-01-22 23:00:19 +04:00
atomic_t bla_num_requests ; /* number of bla requests in flight */
2011-04-27 16:27:57 +04:00
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
* If true , then I received a Roaming_adv and I have to inspect every
* packet directed to me to check whether I am still the true
* destination or not . This flag will be reset to false as soon as I
2012-05-12 04:09:43 +04:00
* increase my TTVN
*/
2011-04-27 16:27:57 +04:00
bool tt_poss_change ;
2010-12-13 14:19:28 +03:00
char num_ifaces ;
struct debug_log * debug_log ;
struct kobject * mesh_obj ;
struct dentry * debug_dir ;
struct hlist_head forw_bat_list ;
struct hlist_head forw_bcast_list ;
struct hlist_head gw_list ;
2011-04-27 16:27:44 +04:00
struct list_head tt_changes_list ; /* tracks changes in a OGM int */
2010-12-13 14:19:28 +03:00
struct list_head vis_send_list ;
struct hashtable_t * orig_hash ;
2011-05-05 10:42:45 +04:00
struct hashtable_t * tt_local_hash ;
struct hashtable_t * tt_global_hash ;
2012-01-22 23:00:27 +04:00
# ifdef CONFIG_BATMAN_ADV_BLA
2012-01-22 23:00:19 +04:00
struct hashtable_t * claim_hash ;
struct hashtable_t * backbone_hash ;
2012-01-22 23:00:27 +04:00
# endif
2011-04-27 16:27:44 +04:00
struct list_head tt_req_list ; /* list of pending tt_requests */
2011-04-27 16:27:57 +04:00
struct list_head tt_roam_list ;
2010-12-13 14:19:28 +03:00
struct hashtable_t * vis_hash ;
2012-01-22 23:00:27 +04:00
# ifdef CONFIG_BATMAN_ADV_BLA
2012-06-04 00:19:17 +04:00
struct bcast_duplist_entry bcast_duplist [ BATADV_DUPLIST_SIZE ] ;
2012-01-22 23:00:24 +04:00
int bcast_duplist_curr ;
2012-01-22 23:00:26 +04:00
struct bla_claim_dst claim_dest ;
2012-01-22 23:00:27 +04:00
# endif
2010-12-13 14:19:28 +03:00
spinlock_t forw_bat_list_lock ; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock ; /* protects */
2011-04-27 16:27:44 +04:00
spinlock_t tt_changes_list_lock ; /* protects tt_changes */
spinlock_t tt_req_list_lock ; /* protects tt_req_list */
2011-04-27 16:27:57 +04:00
spinlock_t tt_roam_list_lock ; /* protects tt_roam_list */
2011-02-14 00:13:02 +03:00
spinlock_t gw_list_lock ; /* protects gw_list and curr_gw */
2010-12-13 14:19:28 +03:00
spinlock_t vis_hash_lock ; /* protects vis_hash */
spinlock_t vis_list_lock ; /* protects vis_info::recv_list */
2011-04-27 16:27:44 +04:00
atomic_t num_local_tt ;
/* Checksum of the local table, recomputed before sending a new OGM */
2012-04-14 15:15:26 +04:00
uint16_t tt_crc ;
2011-04-27 16:27:44 +04:00
unsigned char * tt_buff ;
int16_t tt_buff_len ;
spinlock_t tt_buff_lock ; /* protects tt_buff */
2011-05-05 10:42:45 +04:00
struct delayed_work tt_work ;
2010-12-13 14:19:28 +03:00
struct delayed_work orig_work ;
struct delayed_work vis_work ;
2012-01-22 23:00:19 +04:00
struct delayed_work bla_work ;
2011-02-14 00:13:02 +03:00
struct gw_node __rcu * curr_gw ; /* rcu protected pointer */
2011-04-27 02:22:00 +04:00
atomic_t gw_reselect ;
2011-04-20 17:40:58 +04:00
struct hard_iface __rcu * primary_if ; /* rcu protected pointer */
2010-12-13 14:19:28 +03:00
struct vis_info * my_vis_info ;
2011-11-28 13:40:17 +04:00
struct bat_algo_ops * bat_algo_ops ;
2010-12-13 14:19:28 +03:00
} ;
struct socket_client {
struct list_head queue_list ;
unsigned int queue_len ;
unsigned char index ;
spinlock_t lock ; /* protects queue_list, queue_len, index */
wait_queue_head_t queue_wait ;
struct bat_priv * bat_priv ;
} ;
struct socket_packet {
struct list_head list ;
size_t icmp_len ;
struct icmp_packet_rr icmp_packet ;
} ;
2011-10-30 15:17:33 +04:00
struct tt_common_entry {
2010-12-13 14:19:28 +03:00
uint8_t addr [ ETH_ALEN ] ;
2011-10-22 02:55:39 +04:00
struct hlist_node hash_entry ;
2011-06-17 18:11:27 +04:00
uint16_t flags ;
2011-04-27 16:28:07 +04:00
atomic_t refcount ;
struct rcu_head rcu ;
2010-12-13 14:19:28 +03:00
} ;
2011-10-30 15:17:33 +04:00
struct tt_local_entry {
struct tt_common_entry common ;
unsigned long last_seen ;
} ;
2011-05-05 10:42:45 +04:00
struct tt_global_entry {
2011-10-30 15:17:33 +04:00
struct tt_common_entry common ;
2011-10-22 22:12:51 +04:00
struct hlist_head orig_list ;
spinlock_t list_lock ; /* protects the list */
unsigned long roam_at ; /* time at which TT_GLOBAL_ROAM was set */
} ;
struct tt_orig_list_entry {
2010-12-13 14:19:28 +03:00
struct orig_node * orig_node ;
2011-04-27 16:27:44 +04:00
uint8_t ttvn ;
2011-10-22 22:12:51 +04:00
struct rcu_head rcu ;
struct hlist_node list ;
2010-12-13 14:19:28 +03:00
} ;
2012-01-22 23:00:27 +04:00
# ifdef CONFIG_BATMAN_ADV_BLA
2012-01-22 23:00:19 +04:00
struct backbone_gw {
uint8_t orig [ ETH_ALEN ] ;
short vid ; /* used VLAN ID */
struct hlist_node hash_entry ;
struct bat_priv * bat_priv ;
unsigned long lasttime ; /* last time we heard of this backbone gw */
atomic_t request_sent ;
atomic_t refcount ;
struct rcu_head rcu ;
uint16_t crc ; /* crc checksum over all claims */
} ;
struct claim {
uint8_t addr [ ETH_ALEN ] ;
short vid ;
struct backbone_gw * backbone_gw ;
unsigned long lasttime ; /* last time we heard of claim (locals only) */
struct rcu_head rcu ;
atomic_t refcount ;
struct hlist_node hash_entry ;
} ;
2012-01-22 23:00:27 +04:00
# endif
2012-01-22 23:00:19 +04:00
2011-04-27 16:27:44 +04:00
struct tt_change_node {
struct list_head list ;
struct tt_change change ;
} ;
struct tt_req_node {
uint8_t addr [ ETH_ALEN ] ;
unsigned long issued_at ;
struct list_head list ;
} ;
2011-04-27 16:27:57 +04:00
struct tt_roam_node {
uint8_t addr [ ETH_ALEN ] ;
atomic_t counter ;
unsigned long first_time ;
struct list_head list ;
} ;
2012-05-12 04:09:43 +04:00
/* forw_packet - structure for forw_list maintaining packets to be
2010-12-13 14:19:28 +03:00
* send / forwarded
*/
struct forw_packet {
struct hlist_node list ;
unsigned long send_time ;
uint8_t own ;
struct sk_buff * skb ;
uint16_t packet_len ;
uint32_t direct_link_flags ;
uint8_t num_packets ;
struct delayed_work delayed_work ;
2011-02-18 15:33:20 +03:00
struct hard_iface * if_incoming ;
2010-12-13 14:19:28 +03:00
} ;
/* While scanning for vis-entries of a particular vis-originator
* this list collects its interfaces to create a subgraph / cluster
* out of them later
*/
struct if_list_entry {
uint8_t addr [ ETH_ALEN ] ;
bool primary ;
struct hlist_node list ;
} ;
struct debug_log {
2012-06-04 00:19:17 +04:00
char log_buff [ BATADV_LOG_BUF_LEN ] ;
2010-12-13 14:19:28 +03:00
unsigned long log_start ;
unsigned long log_end ;
spinlock_t lock ; /* protects log_buff, log_start and log_end */
wait_queue_head_t queue_wait ;
} ;
struct frag_packet_list_entry {
struct list_head list ;
uint16_t seqno ;
struct sk_buff * skb ;
} ;
struct vis_info {
2011-05-18 18:47:23 +04:00
unsigned long first_seen ;
/* list of server-neighbors we received a vis-packet
2012-05-12 04:09:43 +04:00
* from . we should not reply to them .
*/
2011-05-18 18:47:23 +04:00
struct list_head recv_list ;
2010-12-13 14:19:28 +03:00
struct list_head send_list ;
struct kref refcount ;
2011-02-18 15:28:09 +03:00
struct hlist_node hash_entry ;
2010-12-13 14:19:28 +03:00
struct bat_priv * bat_priv ;
/* this packet might be part of the vis send queue. */
struct sk_buff * skb_packet ;
2012-05-12 04:09:43 +04:00
/* vis_info may follow here */
2011-01-15 17:39:43 +03:00
} __packed ;
2010-12-13 14:19:28 +03:00
struct vis_info_entry {
uint8_t src [ ETH_ALEN ] ;
uint8_t dest [ ETH_ALEN ] ;
2011-05-05 10:42:45 +04:00
uint8_t quality ; /* quality = 0 client */
2011-01-15 17:39:43 +03:00
} __packed ;
2010-12-13 14:19:28 +03:00
struct recvlist_node {
struct list_head list ;
uint8_t mac [ ETH_ALEN ] ;
} ;
2011-11-28 13:40:17 +04:00
struct bat_algo_ops {
struct hlist_node list ;
char * name ;
2012-02-07 13:20:45 +04:00
/* init routing info when hard-interface is enabled */
2012-02-07 13:20:48 +04:00
int ( * bat_iface_enable ) ( struct hard_iface * hard_iface ) ;
2012-02-07 13:20:47 +04:00
/* de-init routing info when hard-interface is disabled */
void ( * bat_iface_disable ) ( struct hard_iface * hard_iface ) ;
2012-03-11 02:17:50 +04:00
/* (re-)init mac addresses of the protocol information
* belonging to this hard - interface
*/
void ( * bat_iface_update_mac ) ( struct hard_iface * hard_iface ) ;
2012-02-07 13:20:49 +04:00
/* called when primary interface is selected / changed */
void ( * bat_primary_iface_set ) ( struct hard_iface * hard_iface ) ;
2011-11-28 17:31:55 +04:00
/* prepare a new outgoing OGM for the send queue */
2012-05-07 00:22:05 +04:00
void ( * bat_ogm_schedule ) ( struct hard_iface * hard_iface ) ;
2011-11-28 17:31:55 +04:00
/* send scheduled OGM */
void ( * bat_ogm_emit ) ( struct forw_packet * forw_packet ) ;
2011-11-28 13:40:17 +04:00
} ;
2010-12-13 14:19:28 +03:00
# endif /* _NET_BATMAN_ADV_TYPES_H_ */