2017-11-19 17:05:11 +03:00
// SPDX-License-Identifier: GPL-2.0
2020-01-01 02:00:01 +03:00
/* Copyright (C) 2013-2020 B.A.T.M.A.N. contributors:
2013-05-23 18:53:02 +04:00
*
* Martin Hundebøll < martin @ hundeboll . net >
*/
# include "fragmentation.h"
2015-04-17 20:40:28 +03:00
# include "main.h"
# include <linux/atomic.h>
# include <linux/byteorder/generic.h>
2016-07-17 22:04:02 +03:00
# include <linux/errno.h>
2015-04-17 20:40:28 +03:00
# include <linux/etherdevice.h>
2017-11-19 19:12:02 +03:00
# include <linux/gfp.h>
2015-04-17 20:40:28 +03:00
# include <linux/if_ether.h>
# include <linux/jiffies.h>
# include <linux/kernel.h>
2015-06-21 15:45:15 +03:00
# include <linux/lockdep.h>
2015-04-17 20:40:28 +03:00
# include <linux/netdevice.h>
# include <linux/skbuff.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/string.h>
2017-12-21 12:17:41 +03:00
# include <uapi/linux/batadv_packet.h>
2015-04-17 20:40:28 +03:00
# include "hard-interface.h"
2013-05-23 18:53:02 +04:00
# include "originator.h"
# include "routing.h"
2015-04-17 20:40:28 +03:00
# include "send.h"
2013-05-23 18:53:02 +04:00
# include "soft-interface.h"
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_clear_chain ( ) - delete entries in the fragment buffer chain
2013-05-23 18:53:02 +04:00
* @ head : head of chain with entries .
2016-07-17 22:04:00 +03:00
* @ dropped : whether the chain is cleared because all fragments are dropped
2013-05-23 18:53:02 +04:00
*
* Free fragments in the passed hlist . Should be called with appropriate lock .
*/
2016-07-17 22:04:00 +03:00
static void batadv_frag_clear_chain ( struct hlist_head * head , bool dropped )
2013-05-23 18:53:02 +04:00
{
struct batadv_frag_list_entry * entry ;
struct hlist_node * node ;
hlist_for_each_entry_safe ( entry , node , head , list ) {
hlist_del ( & entry - > list ) ;
2016-07-17 22:04:00 +03:00
if ( dropped )
kfree_skb ( entry - > skb ) ;
else
consume_skb ( entry - > skb ) ;
2013-05-23 18:53:02 +04:00
kfree ( entry ) ;
}
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_purge_orig ( ) - free fragments associated to an orig
2013-05-23 18:53:02 +04:00
* @ orig_node : originator to free fragments from
* @ check_cb : optional function to tell if an entry should be purged
*/
void batadv_frag_purge_orig ( struct batadv_orig_node * orig_node ,
bool ( * check_cb ) ( struct batadv_frag_table_entry * ) )
{
struct batadv_frag_table_entry * chain ;
2015-05-26 19:34:26 +03:00
u8 i ;
2013-05-23 18:53:02 +04:00
for ( i = 0 ; i < BATADV_FRAG_BUFFER_COUNT ; i + + ) {
chain = & orig_node - > fragments [ i ] ;
2015-08-26 11:31:50 +03:00
spin_lock_bh ( & chain - > lock ) ;
2013-05-23 18:53:02 +04:00
if ( ! check_cb | | check_cb ( chain ) ) {
2016-07-17 22:04:00 +03:00
batadv_frag_clear_chain ( & chain - > fragment_list , true ) ;
2015-08-26 11:31:50 +03:00
chain - > size = 0 ;
2013-05-23 18:53:02 +04:00
}
2015-08-26 11:31:50 +03:00
spin_unlock_bh ( & chain - > lock ) ;
2013-05-23 18:53:02 +04:00
}
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_size_limit ( ) - maximum possible size of packet to be fragmented
2013-05-23 18:53:02 +04:00
*
2015-09-15 20:00:48 +03:00
* Return : the maximum size of payload that can be fragmented .
2013-05-23 18:53:02 +04:00
*/
static int batadv_frag_size_limit ( void )
{
int limit = BATADV_FRAG_MAX_FRAG_SIZE ;
limit - = sizeof ( struct batadv_frag_packet ) ;
limit * = BATADV_FRAG_MAX_FRAGMENTS ;
return limit ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_init_chain ( ) - check and prepare fragment chain for new fragment
2013-05-23 18:53:02 +04:00
* @ chain : chain in fragments table to init
* @ seqno : sequence number of the received fragment
*
* Make chain ready for a fragment with sequence number " seqno " . Delete existing
* entries if they have an " old " sequence number .
*
* Caller must hold chain - > lock .
*
2015-09-15 20:00:48 +03:00
* Return : true if chain is empty and caller can just insert the new fragment
2013-05-23 18:53:02 +04:00
* without searching for the right position .
*/
static bool batadv_frag_init_chain ( struct batadv_frag_table_entry * chain ,
2015-05-26 19:34:26 +03:00
u16 seqno )
2013-05-23 18:53:02 +04:00
{
2015-06-21 15:45:15 +03:00
lockdep_assert_held ( & chain - > lock ) ;
2013-05-23 18:53:02 +04:00
if ( chain - > seqno = = seqno )
return false ;
2016-07-27 13:31:07 +03:00
if ( ! hlist_empty ( & chain - > fragment_list ) )
2016-07-17 22:04:00 +03:00
batadv_frag_clear_chain ( & chain - > fragment_list , true ) ;
2013-05-23 18:53:02 +04:00
chain - > size = 0 ;
chain - > seqno = seqno ;
return true ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_insert_packet ( ) - insert a fragment into a fragment chain
2013-05-23 18:53:02 +04:00
* @ orig_node : originator that the fragment was received from
* @ skb : skb to insert
* @ chain_out : list head to attach complete chains of fragments to
*
* Insert a new fragment into the reverse ordered chain in the right table
* entry . The hash table entry is cleared if " old " fragments exist in it .
*
2015-09-15 20:00:48 +03:00
* Return : true if skb is buffered , false on error . If the chain has all the
2013-05-23 18:53:02 +04:00
* fragments needed to merge the packet , the chain is moved to the passed head
* to avoid locking the chain in the table .
*/
static bool batadv_frag_insert_packet ( struct batadv_orig_node * orig_node ,
struct sk_buff * skb ,
struct hlist_head * chain_out )
{
struct batadv_frag_table_entry * chain ;
struct batadv_frag_list_entry * frag_entry_new = NULL , * frag_entry_curr ;
2014-05-26 19:21:39 +04:00
struct batadv_frag_list_entry * frag_entry_last = NULL ;
2013-05-23 18:53:02 +04:00
struct batadv_frag_packet * frag_packet ;
2015-05-26 19:34:26 +03:00
u8 bucket ;
u16 seqno , hdr_size = sizeof ( struct batadv_frag_packet ) ;
2013-05-23 18:53:02 +04:00
bool ret = false ;
/* Linearize packet to avoid linearizing 16 packets in a row when doing
* the later merge . Non - linear merge should be added to remove this
* linearization .
*/
if ( skb_linearize ( skb ) < 0 )
goto err ;
frag_packet = ( struct batadv_frag_packet * ) skb - > data ;
seqno = ntohs ( frag_packet - > seqno ) ;
bucket = seqno % BATADV_FRAG_BUFFER_COUNT ;
frag_entry_new = kmalloc ( sizeof ( * frag_entry_new ) , GFP_ATOMIC ) ;
if ( ! frag_entry_new )
goto err ;
frag_entry_new - > skb = skb ;
frag_entry_new - > no = frag_packet - > no ;
/* Select entry in the "chain table" and delete any prior fragments
* with another sequence number . batadv_frag_init_chain ( ) returns true ,
* if the list is empty at return .
*/
chain = & orig_node - > fragments [ bucket ] ;
spin_lock_bh ( & chain - > lock ) ;
if ( batadv_frag_init_chain ( chain , seqno ) ) {
2016-07-27 13:31:07 +03:00
hlist_add_head ( & frag_entry_new - > list , & chain - > fragment_list ) ;
2013-05-23 18:53:02 +04:00
chain - > size = skb - > len - hdr_size ;
chain - > timestamp = jiffies ;
2014-12-01 12:37:27 +03:00
chain - > total_size = ntohs ( frag_packet - > total_size ) ;
2013-05-23 18:53:02 +04:00
ret = true ;
goto out ;
}
/* Find the position for the new fragment. */
2016-07-27 13:31:07 +03:00
hlist_for_each_entry ( frag_entry_curr , & chain - > fragment_list , list ) {
2013-05-23 18:53:02 +04:00
/* Drop packet if fragment already exists. */
if ( frag_entry_curr - > no = = frag_entry_new - > no )
goto err_unlock ;
/* Order fragments from highest to lowest. */
if ( frag_entry_curr - > no < frag_entry_new - > no ) {
hlist_add_before ( & frag_entry_new - > list ,
& frag_entry_curr - > list ) ;
chain - > size + = skb - > len - hdr_size ;
chain - > timestamp = jiffies ;
ret = true ;
goto out ;
}
2014-05-26 19:21:39 +04:00
/* store current entry because it could be the last in list */
frag_entry_last = frag_entry_curr ;
2013-05-23 18:53:02 +04:00
}
2014-05-26 19:21:39 +04:00
/* Reached the end of the list, so insert after 'frag_entry_last'. */
if ( likely ( frag_entry_last ) ) {
2014-08-15 12:19:39 +04:00
hlist_add_behind ( & frag_entry_new - > list , & frag_entry_last - > list ) ;
2013-05-23 18:53:02 +04:00
chain - > size + = skb - > len - hdr_size ;
chain - > timestamp = jiffies ;
ret = true ;
}
out :
if ( chain - > size > batadv_frag_size_limit ( ) | |
2014-12-01 12:37:27 +03:00
chain - > total_size ! = ntohs ( frag_packet - > total_size ) | |
chain - > total_size > batadv_frag_size_limit ( ) ) {
2013-05-23 18:53:02 +04:00
/* Clear chain if total size of either the list or the packet
2014-12-01 12:37:27 +03:00
* exceeds the maximum size of one merged packet . Don ' t allow
* packets to have different total_size .
2013-05-23 18:53:02 +04:00
*/
2016-07-17 22:04:00 +03:00
batadv_frag_clear_chain ( & chain - > fragment_list , true ) ;
2013-05-23 18:53:02 +04:00
chain - > size = 0 ;
} else if ( ntohs ( frag_packet - > total_size ) = = chain - > size ) {
/* All fragments received. Hand over chain to caller. */
2016-07-27 13:31:07 +03:00
hlist_move_list ( & chain - > fragment_list , chain_out ) ;
2013-05-23 18:53:02 +04:00
chain - > size = 0 ;
}
err_unlock :
spin_unlock_bh ( & chain - > lock ) ;
err :
2017-02-12 13:26:33 +03:00
if ( ! ret ) {
2013-05-23 18:53:02 +04:00
kfree ( frag_entry_new ) ;
2017-02-12 13:26:33 +03:00
kfree_skb ( skb ) ;
}
2013-05-23 18:53:02 +04:00
return ret ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_merge_packets ( ) - merge a chain of fragments
2013-05-23 18:53:02 +04:00
* @ chain : head of chain with fragments
*
* Expand the first skb in the chain and copy the content of the remaining
* skb ' s into the expanded one . After doing so , clear the chain .
*
2015-09-15 20:00:48 +03:00
* Return : the merged skb or NULL on error .
2013-05-23 18:53:02 +04:00
*/
static struct sk_buff *
2014-12-01 12:37:28 +03:00
batadv_frag_merge_packets ( struct hlist_head * chain )
2013-05-23 18:53:02 +04:00
{
struct batadv_frag_packet * packet ;
struct batadv_frag_list_entry * entry ;
2016-07-25 01:42:44 +03:00
struct sk_buff * skb_out ;
2013-05-23 18:53:02 +04:00
int size , hdr_size = sizeof ( struct batadv_frag_packet ) ;
2016-07-17 22:04:00 +03:00
bool dropped = false ;
2013-05-23 18:53:02 +04:00
/* Remove first entry, as this is the destination for the rest of the
* fragments .
*/
entry = hlist_entry ( chain - > first , struct batadv_frag_list_entry , list ) ;
hlist_del ( & entry - > list ) ;
skb_out = entry - > skb ;
kfree ( entry ) ;
2014-12-01 12:37:28 +03:00
packet = ( struct batadv_frag_packet * ) skb_out - > data ;
2018-11-08 01:09:12 +03:00
size = ntohs ( packet - > total_size ) + hdr_size ;
2014-12-01 12:37:28 +03:00
2013-05-23 18:53:02 +04:00
/* Make room for the rest of the fragments. */
2014-12-20 15:48:55 +03:00
if ( pskb_expand_head ( skb_out , 0 , size - skb_out - > len , GFP_ATOMIC ) < 0 ) {
2013-05-23 18:53:02 +04:00
kfree_skb ( skb_out ) ;
skb_out = NULL ;
2016-07-17 22:04:00 +03:00
dropped = true ;
2013-05-23 18:53:02 +04:00
goto free ;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header . )
*/
2018-01-23 12:59:50 +03:00
skb_pull ( skb_out , hdr_size ) ;
skb_out - > ip_summed = CHECKSUM_NONE ;
2013-05-23 18:53:02 +04:00
memmove ( skb_out - > data - ETH_HLEN , skb_mac_header ( skb_out ) , ETH_HLEN ) ;
skb_set_mac_header ( skb_out , - ETH_HLEN ) ;
skb_reset_network_header ( skb_out ) ;
skb_reset_transport_header ( skb_out ) ;
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry ( entry , chain , list ) {
size = entry - > skb - > len - hdr_size ;
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb_out , entry - > skb - > data + hdr_size , size ) ;
2013-05-23 18:53:02 +04:00
}
free :
/* Locking is not needed, because 'chain' is not part of any orig. */
2016-07-17 22:04:00 +03:00
batadv_frag_clear_chain ( chain , dropped ) ;
2013-05-23 18:53:02 +04:00
return skb_out ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_skb_buffer ( ) - buffer fragment for later merge
2013-05-23 18:53:02 +04:00
* @ skb : skb to buffer
* @ orig_node_src : originator that the skb is received from
*
* Add fragment to buffer and merge fragments if possible .
*
* There are three possible outcomes : 1 ) Packet is merged : Return true and
* set * skb to merged packet ; 2 ) Packet is buffered : Return true and set * skb
2017-02-12 13:26:33 +03:00
* to NULL ; 3 ) Error : Return false and free skb .
2015-09-15 20:00:48 +03:00
*
* Return : true when packet is merged or buffered , false when skb is not not
* used .
2013-05-23 18:53:02 +04:00
*/
bool batadv_frag_skb_buffer ( struct sk_buff * * skb ,
struct batadv_orig_node * orig_node_src )
{
struct sk_buff * skb_out = NULL ;
struct hlist_head head = HLIST_HEAD_INIT ;
bool ret = false ;
/* Add packet to buffer and table entry if merge is possible. */
if ( ! batadv_frag_insert_packet ( orig_node_src , * skb , & head ) )
goto out_err ;
/* Leave if more fragments are needed to merge. */
if ( hlist_empty ( & head ) )
goto out ;
2014-12-01 12:37:28 +03:00
skb_out = batadv_frag_merge_packets ( & head ) ;
2013-05-23 18:53:02 +04:00
if ( ! skb_out )
goto out_err ;
out :
ret = true ;
out_err :
2017-02-12 13:26:33 +03:00
* skb = skb_out ;
2013-05-23 18:53:02 +04:00
return ret ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_skb_fwd ( ) - forward fragments that would exceed MTU when merged
2013-05-23 18:53:02 +04:00
* @ skb : skb to forward
* @ recv_if : interface that the skb is received on
* @ orig_node_src : originator that the skb is received from
*
* Look up the next - hop of the fragments payload and check if the merged packet
* will exceed the MTU towards the next - hop . If so , the fragment is forwarded
* without merging it .
*
2015-09-15 20:00:48 +03:00
* Return : true if the fragment is consumed / forwarded , false otherwise .
2013-05-23 18:53:02 +04:00
*/
bool batadv_frag_skb_fwd ( struct sk_buff * skb ,
struct batadv_hard_iface * recv_if ,
struct batadv_orig_node * orig_node_src )
{
struct batadv_priv * bat_priv = netdev_priv ( recv_if - > soft_iface ) ;
2016-07-25 01:42:44 +03:00
struct batadv_orig_node * orig_node_dst ;
2013-05-23 18:53:02 +04:00
struct batadv_neigh_node * neigh_node = NULL ;
struct batadv_frag_packet * packet ;
2015-05-26 19:34:26 +03:00
u16 total_size ;
2013-05-23 18:53:02 +04:00
bool ret = false ;
packet = ( struct batadv_frag_packet * ) skb - > data ;
orig_node_dst = batadv_orig_hash_find ( bat_priv , packet - > dest ) ;
if ( ! orig_node_dst )
goto out ;
neigh_node = batadv_find_router ( bat_priv , orig_node_dst , recv_if ) ;
if ( ! neigh_node )
goto out ;
/* Forward the fragment, if the merged packet would be too big to
* be assembled .
*/
total_size = ntohs ( packet - > total_size ) ;
if ( total_size > neigh_node - > if_incoming - > net_dev - > mtu ) {
batadv_inc_counter ( bat_priv , BATADV_CNT_FRAG_FWD ) ;
batadv_add_counter ( bat_priv , BATADV_CNT_FRAG_FWD_BYTES ,
skb - > len + ETH_HLEN ) ;
2013-12-02 23:38:31 +04:00
packet - > ttl - - ;
2016-01-16 11:40:15 +03:00
batadv_send_unicast_skb ( skb , neigh_node ) ;
2013-05-23 18:53:02 +04:00
ret = true ;
}
out :
if ( orig_node_dst )
2016-01-17 13:01:09 +03:00
batadv_orig_node_put ( orig_node_dst ) ;
2013-05-23 18:53:02 +04:00
if ( neigh_node )
2016-01-17 13:01:11 +03:00
batadv_neigh_node_put ( neigh_node ) ;
2013-05-23 18:53:02 +04:00
return ret ;
}
2013-05-23 18:53:03 +04:00
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_create ( ) - create a fragment from skb
2013-05-23 18:53:03 +04:00
* @ skb : skb to create fragment from
* @ frag_head : header to use in new fragment
2017-02-22 19:25:42 +03:00
* @ fragment_size : size of new fragment
2013-05-23 18:53:03 +04:00
*
* Split the passed skb into two fragments : A new one with size matching the
* passed mtu and the old one with the rest . The new skb contains data from the
* tail of the old skb .
*
2015-09-15 20:00:48 +03:00
* Return : the new fragment , NULL on error .
2013-05-23 18:53:03 +04:00
*/
static struct sk_buff * batadv_frag_create ( struct sk_buff * skb ,
struct batadv_frag_packet * frag_head ,
2017-02-22 19:25:42 +03:00
unsigned int fragment_size )
2013-05-23 18:53:03 +04:00
{
struct sk_buff * skb_fragment ;
2016-03-10 00:22:51 +03:00
unsigned int header_size = sizeof ( * frag_head ) ;
2017-02-22 19:25:42 +03:00
unsigned int mtu = fragment_size + header_size ;
2013-05-23 18:53:03 +04:00
skb_fragment = netdev_alloc_skb ( NULL , mtu + ETH_HLEN ) ;
if ( ! skb_fragment )
goto err ;
2016-05-09 21:03:35 +03:00
skb_fragment - > priority = skb - > priority ;
2013-05-23 18:53:03 +04:00
/* Eat the last mtu-bytes of the skb */
skb_reserve ( skb_fragment , header_size + ETH_HLEN ) ;
skb_split ( skb , skb_fragment , skb - > len - fragment_size ) ;
/* Add the header */
skb_push ( skb_fragment , header_size ) ;
memcpy ( skb_fragment - > data , frag_head , header_size ) ;
err :
return skb_fragment ;
}
/**
2017-12-02 21:51:47 +03:00
* batadv_frag_send_packet ( ) - create up to 16 fragments from the passed skb
2013-05-23 18:53:03 +04:00
* @ skb : skb to create fragments from
* @ orig_node : final destination of the created fragments
* @ neigh_node : next - hop of the created fragments
*
2016-07-17 22:04:02 +03:00
* Return : the netdev tx status or a negative errno code on a failure
2013-05-23 18:53:03 +04:00
*/
2016-05-18 12:38:48 +03:00
int batadv_frag_send_packet ( struct sk_buff * skb ,
struct batadv_orig_node * orig_node ,
struct batadv_neigh_node * neigh_node )
2013-05-23 18:53:03 +04:00
{
struct batadv_priv * bat_priv ;
2014-04-23 16:05:16 +04:00
struct batadv_hard_iface * primary_if = NULL ;
2013-05-23 18:53:03 +04:00
struct batadv_frag_packet frag_header ;
struct sk_buff * skb_fragment ;
2016-03-10 00:22:51 +03:00
unsigned int mtu = neigh_node - > if_incoming - > net_dev - > mtu ;
unsigned int header_size = sizeof ( frag_header ) ;
2017-02-22 19:25:42 +03:00
unsigned int max_fragment_size , num_fragments ;
2016-07-17 22:04:02 +03:00
int ret ;
2013-05-23 18:53:03 +04:00
/* To avoid merge and refragmentation at next-hops we never send
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
2016-03-10 00:22:51 +03:00
mtu = min_t ( unsigned int , mtu , BATADV_FRAG_MAX_FRAG_SIZE ) ;
2014-12-20 15:48:56 +03:00
max_fragment_size = mtu - header_size ;
2017-02-22 19:25:42 +03:00
if ( skb - > len = = 0 | | max_fragment_size = = 0 )
return - EINVAL ;
num_fragments = ( skb - > len - 1 ) / max_fragment_size + 1 ;
max_fragment_size = ( skb - > len - 1 ) / num_fragments + 1 ;
2013-05-23 18:53:03 +04:00
/* Don't even try to fragment, if we need more than 16 fragments */
2017-02-22 19:25:42 +03:00
if ( num_fragments > BATADV_FRAG_MAX_FRAGMENTS ) {
2016-07-17 22:04:02 +03:00
ret = - EAGAIN ;
goto free_skb ;
}
2013-05-23 18:53:03 +04:00
bat_priv = orig_node - > bat_priv ;
primary_if = batadv_primary_if_get_selected ( bat_priv ) ;
2016-07-17 22:04:02 +03:00
if ( ! primary_if ) {
ret = - EINVAL ;
2016-12-27 10:51:17 +03:00
goto free_skb ;
2016-07-17 22:04:02 +03:00
}
2013-05-23 18:53:03 +04:00
/* Create one header to be copied to all fragments */
2013-12-02 23:38:31 +04:00
frag_header . packet_type = BATADV_UNICAST_FRAG ;
frag_header . version = BATADV_COMPAT_VERSION ;
frag_header . ttl = BATADV_TTL ;
2013-05-23 18:53:03 +04:00
frag_header . seqno = htons ( atomic_inc_return ( & bat_priv - > frag_seqno ) ) ;
frag_header . reserved = 0 ;
frag_header . no = 0 ;
frag_header . total_size = htons ( skb - > len ) ;
2016-05-09 21:03:36 +03:00
/* skb->priority values from 256->263 are magic values to
* directly indicate a specific 802.1 d priority . This is used
* to allow 802.1 d priority to be passed directly in from VLAN
* tags , etc .
*/
if ( skb - > priority > = 256 & & skb - > priority < = 263 )
frag_header . priority = skb - > priority - 256 ;
2017-11-29 12:25:02 +03:00
else
frag_header . priority = 0 ;
2016-05-09 21:03:36 +03:00
2014-01-22 03:42:11 +04:00
ether_addr_copy ( frag_header . orig , primary_if - > net_dev - > dev_addr ) ;
ether_addr_copy ( frag_header . dest , orig_node - > orig ) ;
2013-05-23 18:53:03 +04:00
/* Eat and send fragments from the tail of skb */
while ( skb - > len > max_fragment_size ) {
2017-02-13 22:44:31 +03:00
/* The initial check in this function should cover this case */
if ( unlikely ( frag_header . no = = BATADV_FRAG_MAX_FRAGMENTS - 1 ) ) {
ret = - EINVAL ;
goto put_primary_if ;
}
2017-02-22 19:25:42 +03:00
skb_fragment = batadv_frag_create ( skb , & frag_header ,
max_fragment_size ) ;
2016-07-17 22:04:02 +03:00
if ( ! skb_fragment ) {
ret = - ENOMEM ;
2016-12-27 10:51:17 +03:00
goto put_primary_if ;
2016-07-17 22:04:02 +03:00
}
2013-05-23 18:53:03 +04:00
batadv_inc_counter ( bat_priv , BATADV_CNT_FRAG_TX ) ;
batadv_add_counter ( bat_priv , BATADV_CNT_FRAG_TX_BYTES ,
skb_fragment - > len + ETH_HLEN ) ;
2016-05-18 12:38:48 +03:00
ret = batadv_send_unicast_skb ( skb_fragment , neigh_node ) ;
if ( ret ! = NET_XMIT_SUCCESS ) {
2016-07-17 22:04:02 +03:00
ret = NET_XMIT_DROP ;
2016-12-27 10:51:17 +03:00
goto put_primary_if ;
2016-05-18 12:38:48 +03:00
}
2013-05-23 18:53:03 +04:00
frag_header . no + + ;
}
/* Make room for the fragment header. */
if ( batadv_skb_head_push ( skb , header_size ) < 0 | |
2016-07-17 22:04:02 +03:00
pskb_expand_head ( skb , header_size + ETH_HLEN , 0 , GFP_ATOMIC ) < 0 ) {
ret = - ENOMEM ;
2016-12-27 10:51:17 +03:00
goto put_primary_if ;
2016-07-17 22:04:02 +03:00
}
2013-05-23 18:53:03 +04:00
memcpy ( skb - > data , & frag_header , header_size ) ;
/* Send the last fragment */
batadv_inc_counter ( bat_priv , BATADV_CNT_FRAG_TX ) ;
batadv_add_counter ( bat_priv , BATADV_CNT_FRAG_TX_BYTES ,
skb - > len + ETH_HLEN ) ;
2016-05-18 12:38:48 +03:00
ret = batadv_send_unicast_skb ( skb , neigh_node ) ;
2016-07-17 22:04:02 +03:00
/* skb was consumed */
skb = NULL ;
2013-05-23 18:53:03 +04:00
2016-07-17 22:04:02 +03:00
put_primary_if :
batadv_hardif_put ( primary_if ) ;
free_skb :
kfree_skb ( skb ) ;
2014-04-23 16:05:16 +04:00
return ret ;
2013-05-23 18:53:03 +04:00
}