2008-07-08 14:23:36 +04:00
# include <linux/skbuff.h>
# include <linux/netdevice.h>
# include <linux/if_vlan.h>
2009-03-01 11:11:52 +03:00
# include <linux/netpoll.h>
2011-07-15 19:47:34 +04:00
# include <linux/export.h>
2008-07-08 14:23:36 +04:00
# include "vlan.h"
2011-10-29 10:13:39 +04:00
bool vlan_do_receive ( struct sk_buff * * skbp , bool last_handler )
2008-07-08 14:23:36 +04:00
{
2010-10-20 17:56:06 +04:00
struct sk_buff * skb = * skbp ;
u16 vlan_id = skb - > vlan_tci & VLAN_VID_MASK ;
vlan_dev: VLAN 0 should be treated as "no vlan tag" (802.1p packet)
- Without the 8021q module loaded in the kernel, all 802.1p packets
(VLAN 0 but QoS tagging) are silently discarded (as expected, as
the protocol is not loaded).
- Without this patch in 8021q module, these packets are forwarded to
the module, but they are discarded also if VLAN 0 is not configured,
which should not be the default behaviour, as VLAN 0 is not really
a VLANed packet but a 802.1p packet. Defining VLAN 0 makes it almost
impossible to communicate with mixed 802.1p and non 802.1p devices on
the same network due to arp table issues.
- Changed logic to skip vlan specific code in vlan_skb_recv if VLAN
is 0 and we have not defined a VLAN with ID 0, but we accept the
packet with the encapsulated proto and pass it later to netif_rx.
- In the vlan device event handler, added some logic to add VLAN 0
to HW filter in devices that support it (this prevented any traffic
in VLAN 0 to reach the stack in e1000e with HW filter under 2.6.35,
and probably also with other HW filtered cards, so we fix it here).
- In the vlan unregister logic, prevent the elimination of VLAN 0
in devices with HW filter.
- The default behaviour is to ignore the VLAN 0 tagging and accept
the packet as if it was not tagged, but we can still define a
VLAN 0 if desired (so it is backwards compatible).
Signed-off-by: Pedro Garcia <pedro.netdev@dondevamos.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2010-07-19 02:38:44 +04:00
struct net_device * vlan_dev ;
2010-11-11 02:42:00 +03:00
struct vlan_pcpu_stats * rx_stats ;
2008-07-08 14:23:36 +04:00
2010-10-20 17:56:06 +04:00
vlan_dev = vlan_find_dev ( skb - > dev , vlan_id ) ;
if ( ! vlan_dev ) {
2011-10-29 10:13:39 +04:00
/* Only the last call to vlan_do_receive() should change
* pkt_type to PACKET_OTHERHOST
*/
if ( vlan_id & & last_handler )
2010-10-20 17:56:06 +04:00
skb - > pkt_type = PACKET_OTHERHOST ;
return false ;
2010-09-30 06:16:44 +04:00
}
2008-11-05 01:49:57 +03:00
2010-10-20 17:56:06 +04:00
skb = * skbp = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( unlikely ( ! skb ) )
return false ;
2009-01-06 21:50:09 +03:00
2010-10-20 17:56:06 +04:00
skb - > dev = vlan_dev ;
2011-06-10 10:56:58 +04:00
if ( skb - > pkt_type = = PACKET_OTHERHOST ) {
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device , and still route correctly . */
if ( ! compare_ether_addr ( eth_hdr ( skb ) - > h_dest ,
vlan_dev - > dev_addr ) )
skb - > pkt_type = PACKET_HOST ;
}
if ( ! ( vlan_dev_info ( vlan_dev ) - > flags & VLAN_FLAG_REORDER_HDR ) ) {
unsigned int offset = skb - > data - skb_mac_header ( skb ) ;
/*
* vlan_insert_tag expect skb - > data pointing to mac header .
* So change skb - > data before calling it and change back to
* original position later
*/
skb_push ( skb , offset ) ;
skb = * skbp = vlan_insert_tag ( skb , skb - > vlan_tci ) ;
if ( ! skb )
return false ;
skb_pull ( skb , offset + VLAN_HLEN ) ;
skb_reset_mac_len ( skb ) ;
}
2010-10-20 17:56:06 +04:00
skb - > priority = vlan_get_ingress_priority ( vlan_dev , skb - > vlan_tci ) ;
2008-07-15 09:49:30 +04:00
skb - > vlan_tci = 0 ;
2008-07-08 14:23:36 +04:00
2010-11-11 02:42:00 +03:00
rx_stats = this_cpu_ptr ( vlan_dev_info ( vlan_dev ) - > vlan_pcpu_stats ) ;
2009-11-17 07:53:09 +03:00
2010-06-24 04:55:06 +04:00
u64_stats_update_begin ( & rx_stats - > syncp ) ;
2009-11-17 07:53:09 +03:00
rx_stats - > rx_packets + + ;
rx_stats - > rx_bytes + = skb - > len ;
2011-06-10 10:56:58 +04:00
if ( skb - > pkt_type = = PACKET_MULTICAST )
2010-06-24 04:55:06 +04:00
rx_stats - > rx_multicast + + ;
u64_stats_update_end ( & rx_stats - > syncp ) ;
2010-10-20 17:56:06 +04:00
return true ;
2008-07-08 14:23:36 +04:00
}
2008-07-08 14:23:57 +04:00
2011-07-20 08:54:05 +04:00
/* Must be invoked with rcu_read_lock or with RTNL. */
struct net_device * __vlan_find_dev_deep ( struct net_device * real_dev ,
u16 vlan_id )
{
struct vlan_group * grp = rcu_dereference_rtnl ( real_dev - > vlgrp ) ;
if ( grp ) {
return vlan_group_get_device ( grp , vlan_id ) ;
} else {
/*
* Bonding slaves do not have grp assigned to themselves .
* Grp is assigned to bonding master instead .
*/
if ( netif_is_bond_slave ( real_dev ) )
return __vlan_find_dev_deep ( real_dev - > master , vlan_id ) ;
}
return NULL ;
}
EXPORT_SYMBOL ( __vlan_find_dev_deep ) ;
2008-07-08 14:23:57 +04:00
struct net_device * vlan_dev_real_dev ( const struct net_device * dev )
{
return vlan_dev_info ( dev ) - > real_dev ;
}
2009-01-26 23:37:53 +03:00
EXPORT_SYMBOL ( vlan_dev_real_dev ) ;
2008-07-08 14:23:57 +04:00
u16 vlan_dev_vlan_id ( const struct net_device * dev )
{
return vlan_dev_info ( dev ) - > vlan_id ;
}
2009-01-26 23:37:53 +03:00
EXPORT_SYMBOL ( vlan_dev_vlan_id ) ;
2009-01-06 21:50:09 +03:00
2011-06-10 10:56:58 +04:00
static struct sk_buff * vlan_reorder_header ( struct sk_buff * skb )
2011-04-07 23:48:33 +04:00
{
2011-06-10 10:56:58 +04:00
if ( skb_cow ( skb , skb_headroom ( skb ) ) < 0 )
return NULL ;
memmove ( skb - > data - ETH_HLEN , skb - > data - VLAN_ETH_HLEN , 2 * ETH_ALEN ) ;
skb - > mac_header + = VLAN_HLEN ;
skb_reset_mac_len ( skb ) ;
2011-04-07 23:48:33 +04:00
return skb ;
}
struct sk_buff * vlan_untag ( struct sk_buff * skb )
{
struct vlan_hdr * vhdr ;
u16 vlan_tci ;
if ( unlikely ( vlan_tx_tag_present ( skb ) ) ) {
/* vlan_tci is already set-up so leave this for another time */
return skb ;
}
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( unlikely ( ! skb ) )
goto err_free ;
if ( unlikely ( ! pskb_may_pull ( skb , VLAN_HLEN ) ) )
goto err_free ;
vhdr = ( struct vlan_hdr * ) skb - > data ;
vlan_tci = ntohs ( vhdr - > h_vlan_TCI ) ;
__vlan_hwaccel_put_tag ( skb , vlan_tci ) ;
skb_pull_rcsum ( skb , VLAN_HLEN ) ;
vlan_set_encap_proto ( skb , vhdr ) ;
2011-06-10 10:56:58 +04:00
skb = vlan_reorder_header ( skb ) ;
2011-04-07 23:48:33 +04:00
if ( unlikely ( ! skb ) )
goto err_free ;
2011-08-19 08:29:27 +04:00
skb_reset_network_header ( skb ) ;
skb_reset_transport_header ( skb ) ;
2011-04-07 23:48:33 +04:00
return skb ;
err_free :
kfree_skb ( skb ) ;
return NULL ;
}