2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-11-23 23:07:46 -08:00
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
# include "ipvlan.h"
2015-01-29 12:15:03 +01:00
static u32 ipvlan_jhash_secret __read_mostly ;
2014-11-23 23:07:46 -08:00
void ipvlan_init_secret ( void )
{
net_get_random_once ( & ipvlan_jhash_secret , sizeof ( ipvlan_jhash_secret ) ) ;
}
2017-02-10 16:03:52 -08:00
void ipvlan_count_rx ( const struct ipvl_dev * ipvlan ,
2014-11-23 23:07:46 -08:00
unsigned int len , bool success , bool mcast )
{
if ( likely ( success ) ) {
struct ipvl_pcpu_stats * pcptr ;
pcptr = this_cpu_ptr ( ipvlan - > pcpu_stats ) ;
u64_stats_update_begin ( & pcptr - > syncp ) ;
pcptr - > rx_pkts + + ;
pcptr - > rx_bytes + = len ;
if ( mcast )
pcptr - > rx_mcast + + ;
u64_stats_update_end ( & pcptr - > syncp ) ;
} else {
this_cpu_inc ( ipvlan - > pcpu_stats - > rx_errs ) ;
}
}
2017-02-10 16:03:52 -08:00
EXPORT_SYMBOL_GPL ( ipvlan_count_rx ) ;
2014-11-23 23:07:46 -08:00
2018-02-21 01:31:13 +01:00
# if IS_ENABLED(CONFIG_IPV6)
2014-11-23 23:07:46 -08:00
static u8 ipvlan_get_v6_hash ( const void * iaddr )
{
const struct in6_addr * ip6_addr = iaddr ;
return __ipv6_addr_jhash ( ip6_addr , ipvlan_jhash_secret ) &
IPVLAN_HASH_MASK ;
}
2018-02-21 01:31:13 +01:00
# else
static u8 ipvlan_get_v6_hash ( const void * iaddr )
{
return 0 ;
}
# endif
2014-11-23 23:07:46 -08:00
static u8 ipvlan_get_v4_hash ( const void * iaddr )
{
const struct in_addr * ip4_addr = iaddr ;
return jhash_1word ( ip4_addr - > s_addr , ipvlan_jhash_secret ) &
IPVLAN_HASH_MASK ;
}
2018-02-21 01:31:13 +01:00
static bool addr_equal ( bool is_v6 , struct ipvl_addr * addr , const void * iaddr )
{
if ( ! is_v6 & & addr - > atype = = IPVL_IPV4 ) {
struct in_addr * i4addr = ( struct in_addr * ) iaddr ;
return addr - > ip4addr . s_addr = = i4addr - > s_addr ;
# if IS_ENABLED(CONFIG_IPV6)
} else if ( is_v6 & & addr - > atype = = IPVL_IPV6 ) {
struct in6_addr * i6addr = ( struct in6_addr * ) iaddr ;
return ipv6_addr_equal ( & addr - > ip6addr , i6addr ) ;
# endif
}
return false ;
}
2016-02-20 19:31:41 -08:00
static struct ipvl_addr * ipvlan_ht_addr_lookup ( const struct ipvl_port * port ,
const void * iaddr , bool is_v6 )
2014-11-23 23:07:46 -08:00
{
struct ipvl_addr * addr ;
u8 hash ;
hash = is_v6 ? ipvlan_get_v6_hash ( iaddr ) :
ipvlan_get_v4_hash ( iaddr ) ;
2018-02-21 01:31:13 +01:00
hlist_for_each_entry_rcu ( addr , & port - > hlhead [ hash ] , hlnode )
if ( addr_equal ( is_v6 , addr , iaddr ) )
2014-11-23 23:07:46 -08:00
return addr ;
return NULL ;
}
void ipvlan_ht_addr_add ( struct ipvl_dev * ipvlan , struct ipvl_addr * addr )
{
struct ipvl_port * port = ipvlan - > port ;
u8 hash ;
hash = ( addr - > atype = = IPVL_IPV6 ) ?
ipvlan_get_v6_hash ( & addr - > ip6addr ) :
ipvlan_get_v4_hash ( & addr - > ip4addr ) ;
2015-03-28 19:13:22 +01:00
if ( hlist_unhashed ( & addr - > hlnode ) )
hlist_add_head_rcu ( & addr - > hlnode , & port - > hlhead [ hash ] ) ;
2014-11-23 23:07:46 -08:00
}
2015-07-14 16:35:53 +03:00
void ipvlan_ht_addr_del ( struct ipvl_addr * addr )
2014-11-23 23:07:46 -08:00
{
2015-03-28 19:13:22 +01:00
hlist_del_init_rcu ( & addr - > hlnode ) ;
2014-11-23 23:07:46 -08:00
}
2015-03-28 19:13:25 +01:00
struct ipvl_addr * ipvlan_find_addr ( const struct ipvl_dev * ipvlan ,
const void * iaddr , bool is_v6 )
2014-11-23 23:07:46 -08:00
{
2018-02-28 10:59:27 +01:00
struct ipvl_addr * addr , * ret = NULL ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( addr , & ipvlan - > addrs , anode ) {
if ( addr_equal ( is_v6 , addr , iaddr ) ) {
ret = addr ;
break ;
}
}
rcu_read_unlock ( ) ;
return ret ;
2015-03-28 19:13:25 +01:00
}
2014-11-23 23:07:46 -08:00
2015-03-28 19:13:25 +01:00
bool ipvlan_addr_busy ( struct ipvl_port * port , void * iaddr , bool is_v6 )
{
struct ipvl_dev * ipvlan ;
2018-02-28 10:59:27 +01:00
bool ret = false ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( ipvlan , & port - > ipvlans , pnode ) {
if ( ipvlan_find_addr ( ipvlan , iaddr , is_v6 ) ) {
ret = true ;
break ;
}
2015-03-28 19:13:25 +01:00
}
2018-02-28 10:59:27 +01:00
rcu_read_unlock ( ) ;
return ret ;
2014-11-23 23:07:46 -08:00
}
2019-02-08 13:55:31 +01:00
void * ipvlan_get_L3_hdr ( struct ipvl_port * port , struct sk_buff * skb , int * type )
2014-11-23 23:07:46 -08:00
{
void * lyr3h = NULL ;
switch ( skb - > protocol ) {
case htons ( ETH_P_ARP ) : {
struct arphdr * arph ;
2017-11-23 11:47:11 +08:00
if ( unlikely ( ! pskb_may_pull ( skb , arp_hdr_len ( port - > dev ) ) ) )
2014-11-23 23:07:46 -08:00
return NULL ;
arph = arp_hdr ( skb ) ;
* type = IPVL_ARP ;
lyr3h = arph ;
break ;
}
case htons ( ETH_P_IP ) : {
u32 pktlen ;
struct iphdr * ip4h ;
if ( unlikely ( ! pskb_may_pull ( skb , sizeof ( * ip4h ) ) ) )
return NULL ;
ip4h = ip_hdr ( skb ) ;
pktlen = ntohs ( ip4h - > tot_len ) ;
if ( ip4h - > ihl < 5 | | ip4h - > version ! = 4 )
return NULL ;
if ( skb - > len < pktlen | | pktlen < ( ip4h - > ihl * 4 ) )
return NULL ;
* type = IPVL_IPV4 ;
lyr3h = ip4h ;
break ;
}
2018-02-21 01:31:13 +01:00
# if IS_ENABLED(CONFIG_IPV6)
2014-11-23 23:07:46 -08:00
case htons ( ETH_P_IPV6 ) : {
struct ipv6hdr * ip6h ;
if ( unlikely ( ! pskb_may_pull ( skb , sizeof ( * ip6h ) ) ) )
return NULL ;
ip6h = ipv6_hdr ( skb ) ;
if ( ip6h - > version ! = 6 )
return NULL ;
* type = IPVL_IPV6 ;
lyr3h = ip6h ;
/* Only Neighbour Solicitation pkts need different treatment */
if ( ipv6_addr_any ( & ip6h - > saddr ) & &
ip6h - > nexthdr = = NEXTHDR_ICMP ) {
2017-11-23 11:47:12 +08:00
struct icmp6hdr * icmph ;
if ( unlikely ( ! pskb_may_pull ( skb , sizeof ( * ip6h ) + sizeof ( * icmph ) ) ) )
return NULL ;
ip6h = ipv6_hdr ( skb ) ;
icmph = ( struct icmp6hdr * ) ( ip6h + 1 ) ;
if ( icmph - > icmp6_type = = NDISC_NEIGHBOUR_SOLICITATION ) {
/* Need to access the ipv6 address in body */
if ( unlikely ( ! pskb_may_pull ( skb , sizeof ( * ip6h ) + sizeof ( * icmph )
+ sizeof ( struct in6_addr ) ) ) )
return NULL ;
ip6h = ipv6_hdr ( skb ) ;
icmph = ( struct icmp6hdr * ) ( ip6h + 1 ) ;
}
2014-11-23 23:07:46 -08:00
* type = IPVL_ICMPV6 ;
2017-11-23 11:47:12 +08:00
lyr3h = icmph ;
2014-11-23 23:07:46 -08:00
}
break ;
}
2018-02-21 01:31:13 +01:00
# endif
2014-11-23 23:07:46 -08:00
default :
return NULL ;
}
return lyr3h ;
}
unsigned int ipvlan_mac_hash ( const unsigned char * addr )
{
u32 hash = jhash_1word ( __get_unaligned_cpu32 ( addr + 2 ) ,
ipvlan_jhash_secret ) ;
return hash & IPVLAN_MAC_FILTER_MASK ;
}
2015-05-04 17:06:03 -07:00
void ipvlan_process_multicast ( struct work_struct * work )
2014-11-23 23:07:46 -08:00
{
2015-05-04 17:06:03 -07:00
struct ipvl_port * port = container_of ( work , struct ipvl_port , wq ) ;
struct ethhdr * ethh ;
2014-11-23 23:07:46 -08:00
struct ipvl_dev * ipvlan ;
2015-05-04 17:06:03 -07:00
struct sk_buff * skb , * nskb ;
struct sk_buff_head list ;
2014-11-23 23:07:46 -08:00
unsigned int len ;
unsigned int mac_hash ;
int ret ;
2015-05-04 17:06:03 -07:00
u8 pkt_type ;
2016-12-21 17:30:16 -08:00
bool tx_pkt ;
2014-11-23 23:07:46 -08:00
2015-05-04 17:06:03 -07:00
__skb_queue_head_init ( & list ) ;
2014-11-23 23:07:46 -08:00
2015-05-04 17:06:03 -07:00
spin_lock_bh ( & port - > backlog . lock ) ;
skb_queue_splice_tail_init ( & port - > backlog , & list ) ;
spin_unlock_bh ( & port - > backlog . lock ) ;
2014-11-23 23:07:46 -08:00
2015-05-04 17:06:03 -07:00
while ( ( skb = __skb_dequeue ( & list ) ) ! = NULL ) {
2016-12-21 18:00:24 -08:00
struct net_device * dev = skb - > dev ;
bool consumed = false ;
2015-05-04 17:06:03 -07:00
ethh = eth_hdr ( skb ) ;
2016-12-21 17:30:16 -08:00
tx_pkt = IPVL_SKB_CB ( skb ) - > tx_pkt ;
2015-05-04 17:06:03 -07:00
mac_hash = ipvlan_mac_hash ( ethh - > h_dest ) ;
2014-11-23 23:07:46 -08:00
2015-05-04 17:06:03 -07:00
if ( ether_addr_equal ( ethh - > h_dest , port - > dev - > broadcast ) )
pkt_type = PACKET_BROADCAST ;
2014-11-23 23:07:46 -08:00
else
2015-05-04 17:06:03 -07:00
pkt_type = PACKET_MULTICAST ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( ipvlan , & port - > ipvlans , pnode ) {
2016-12-21 17:30:16 -08:00
if ( tx_pkt & & ( ipvlan - > dev = = skb - > dev ) )
2015-05-04 17:06:03 -07:00
continue ;
if ( ! test_bit ( mac_hash , ipvlan - > mac_filters ) )
continue ;
2016-12-21 18:00:24 -08:00
if ( ! ( ipvlan - > dev - > flags & IFF_UP ) )
continue ;
2015-05-04 17:06:03 -07:00
ret = NET_RX_DROP ;
len = skb - > len + ETH_HLEN ;
nskb = skb_clone ( skb , GFP_ATOMIC ) ;
2016-12-21 18:00:24 -08:00
local_bh_disable ( ) ;
if ( nskb ) {
consumed = true ;
nskb - > pkt_type = pkt_type ;
nskb - > dev = ipvlan - > dev ;
2016-12-21 17:30:16 -08:00
if ( tx_pkt )
2016-12-21 18:00:24 -08:00
ret = dev_forward_skb ( ipvlan - > dev , nskb ) ;
else
ret = netif_rx ( nskb ) ;
}
2015-05-04 17:06:03 -07:00
ipvlan_count_rx ( ipvlan , len , ret = = NET_RX_SUCCESS , true ) ;
2016-12-21 18:00:24 -08:00
local_bh_enable ( ) ;
2015-05-04 17:06:03 -07:00
}
rcu_read_unlock ( ) ;
2016-12-21 17:30:16 -08:00
if ( tx_pkt ) {
2015-05-04 17:06:03 -07:00
/* If the packet originated here, send it out. */
skb - > dev = port - > dev ;
skb - > pkt_type = pkt_type ;
dev_queue_xmit ( skb ) ;
} else {
2016-12-21 18:00:24 -08:00
if ( consumed )
consume_skb ( skb ) ;
else
kfree_skb ( skb ) ;
2014-11-23 23:07:46 -08:00
}
2016-12-21 18:00:24 -08:00
if ( dev )
dev_put ( dev ) ;
2014-11-23 23:07:46 -08:00
}
}
2016-07-25 14:38:16 -07:00
static void ipvlan_skb_crossing_ns ( struct sk_buff * skb , struct net_device * dev )
{
bool xnet = true ;
if ( dev )
xnet = ! net_eq ( dev_net ( skb - > dev ) , dev_net ( dev ) ) ;
skb_scrub_packet ( skb , xnet ) ;
if ( dev )
skb - > dev = dev ;
}
2015-11-16 22:34:26 +01:00
static int ipvlan_rcv_frame ( struct ipvl_addr * addr , struct sk_buff * * pskb ,
2014-11-23 23:07:46 -08:00
bool local )
{
struct ipvl_dev * ipvlan = addr - > master ;
struct net_device * dev = ipvlan - > dev ;
unsigned int len ;
rx_handler_result_t ret = RX_HANDLER_CONSUMED ;
bool success = false ;
2015-11-16 22:34:26 +01:00
struct sk_buff * skb = * pskb ;
2014-11-23 23:07:46 -08:00
len = skb - > len + ETH_HLEN ;
2016-02-20 19:31:41 -08:00
/* Only packets exchanged between two local slaves need to have
* device - up check as well as skb - share check .
*/
if ( local ) {
if ( unlikely ( ! ( dev - > flags & IFF_UP ) ) ) {
kfree_skb ( skb ) ;
goto out ;
}
2014-11-23 23:07:46 -08:00
2016-02-20 19:31:41 -08:00
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( ! skb )
goto out ;
2014-11-23 23:07:46 -08:00
2016-02-20 19:31:41 -08:00
* pskb = skb ;
}
2014-11-23 23:07:46 -08:00
if ( local ) {
2016-02-20 19:31:41 -08:00
skb - > pkt_type = PACKET_HOST ;
2014-11-23 23:07:46 -08:00
if ( dev_forward_skb ( ipvlan - > dev , skb ) = = NET_RX_SUCCESS )
success = true ;
} else {
2017-12-13 14:40:26 -08:00
skb - > dev = dev ;
2014-11-23 23:07:46 -08:00
ret = RX_HANDLER_ANOTHER ;
success = true ;
}
out :
ipvlan_count_rx ( ipvlan , len , success , false ) ;
return ret ;
}
2019-02-08 13:55:31 +01:00
struct ipvl_addr * ipvlan_addr_lookup ( struct ipvl_port * port , void * lyr3h ,
int addr_type , bool use_dest )
2014-11-23 23:07:46 -08:00
{
struct ipvl_addr * addr = NULL ;
2018-02-21 01:31:13 +01:00
switch ( addr_type ) {
# if IS_ENABLED(CONFIG_IPV6)
case IPVL_IPV6 : {
2014-11-23 23:07:46 -08:00
struct ipv6hdr * ip6h ;
struct in6_addr * i6addr ;
ip6h = ( struct ipv6hdr * ) lyr3h ;
i6addr = use_dest ? & ip6h - > daddr : & ip6h - > saddr ;
addr = ipvlan_ht_addr_lookup ( port , i6addr , true ) ;
2018-02-21 01:31:13 +01:00
break ;
}
case IPVL_ICMPV6 : {
2014-11-23 23:07:46 -08:00
struct nd_msg * ndmh ;
struct in6_addr * i6addr ;
/* Make sure that the NeighborSolicitation ICMPv6 packets
* are handled to avoid DAD issue .
*/
ndmh = ( struct nd_msg * ) lyr3h ;
if ( ndmh - > icmph . icmp6_type = = NDISC_NEIGHBOUR_SOLICITATION ) {
i6addr = & ndmh - > target ;
addr = ipvlan_ht_addr_lookup ( port , i6addr , true ) ;
}
2018-02-21 01:31:13 +01:00
break ;
}
# endif
case IPVL_IPV4 : {
2014-11-23 23:07:46 -08:00
struct iphdr * ip4h ;
__be32 * i4addr ;
ip4h = ( struct iphdr * ) lyr3h ;
i4addr = use_dest ? & ip4h - > daddr : & ip4h - > saddr ;
addr = ipvlan_ht_addr_lookup ( port , i4addr , false ) ;
2018-02-21 01:31:13 +01:00
break ;
}
case IPVL_ARP : {
2014-11-23 23:07:46 -08:00
struct arphdr * arph ;
unsigned char * arp_ptr ;
__be32 dip ;
arph = ( struct arphdr * ) lyr3h ;
arp_ptr = ( unsigned char * ) ( arph + 1 ) ;
if ( use_dest )
arp_ptr + = ( 2 * port - > dev - > addr_len ) + 4 ;
else
arp_ptr + = port - > dev - > addr_len ;
memcpy ( & dip , arp_ptr , 4 ) ;
addr = ipvlan_ht_addr_lookup ( port , & dip , false ) ;
2018-02-21 01:31:13 +01:00
break ;
}
2014-11-23 23:07:46 -08:00
}
return addr ;
}
2016-07-25 14:38:16 -07:00
static int ipvlan_process_v4_outbound ( struct sk_buff * skb )
2014-11-23 23:07:46 -08:00
{
const struct iphdr * ip4h = ip_hdr ( skb ) ;
struct net_device * dev = skb - > dev ;
2015-10-07 16:48:44 -05:00
struct net * net = dev_net ( dev ) ;
2014-11-23 23:07:46 -08:00
struct rtable * rt ;
int err , ret = NET_XMIT_DROP ;
struct flowi4 fl4 = {
2015-10-20 16:47:33 -07:00
. flowi4_oif = dev - > ifindex ,
2014-11-23 23:07:46 -08:00
. flowi4_tos = RT_TOS ( ip4h - > tos ) ,
. flowi4_flags = FLOWI_FLAG_ANYSRC ,
2017-12-01 09:58:42 +08:00
. flowi4_mark = skb - > mark ,
2014-11-23 23:07:46 -08:00
. daddr = ip4h - > daddr ,
. saddr = ip4h - > saddr ,
} ;
2015-10-07 16:48:44 -05:00
rt = ip_route_output_flow ( net , & fl4 , NULL ) ;
2014-11-23 23:07:46 -08:00
if ( IS_ERR ( rt ) )
goto err ;
if ( rt - > rt_type ! = RTN_UNICAST & & rt - > rt_type ! = RTN_LOCAL ) {
ip_rt_put ( rt ) ;
goto err ;
}
skb_dst_set ( skb , & rt - > dst ) ;
2015-10-07 16:48:46 -05:00
err = ip_local_out ( net , skb - > sk , skb ) ;
2014-11-23 23:07:46 -08:00
if ( unlikely ( net_xmit_eval ( err ) ) )
dev - > stats . tx_errors + + ;
else
ret = NET_XMIT_SUCCESS ;
goto out ;
err :
dev - > stats . tx_errors + + ;
kfree_skb ( skb ) ;
out :
return ret ;
}
2018-02-21 01:31:13 +01:00
# if IS_ENABLED(CONFIG_IPV6)
2016-07-25 14:38:16 -07:00
static int ipvlan_process_v6_outbound ( struct sk_buff * skb )
2014-11-23 23:07:46 -08:00
{
const struct ipv6hdr * ip6h = ipv6_hdr ( skb ) ;
struct net_device * dev = skb - > dev ;
2015-10-07 16:48:44 -05:00
struct net * net = dev_net ( dev ) ;
2014-11-23 23:07:46 -08:00
struct dst_entry * dst ;
int err , ret = NET_XMIT_DROP ;
struct flowi6 fl6 = {
2017-11-09 20:09:31 +08:00
. flowi6_oif = dev - > ifindex ,
2014-11-23 23:07:46 -08:00
. daddr = ip6h - > daddr ,
. saddr = ip6h - > saddr ,
. flowi6_flags = FLOWI_FLAG_ANYSRC ,
. flowlabel = ip6_flowinfo ( ip6h ) ,
. flowi6_mark = skb - > mark ,
. flowi6_proto = ip6h - > nexthdr ,
} ;
2015-10-07 16:48:44 -05:00
dst = ip6_route_output ( net , NULL , & fl6 ) ;
2015-01-24 21:53:43 -08:00
if ( dst - > error ) {
ret = dst - > error ;
dst_release ( dst ) ;
2014-11-23 23:07:46 -08:00
goto err ;
2015-01-24 21:53:43 -08:00
}
2014-11-23 23:07:46 -08:00
skb_dst_set ( skb , dst ) ;
2015-10-07 16:48:46 -05:00
err = ip6_local_out ( net , skb - > sk , skb ) ;
2014-11-23 23:07:46 -08:00
if ( unlikely ( net_xmit_eval ( err ) ) )
dev - > stats . tx_errors + + ;
else
ret = NET_XMIT_SUCCESS ;
goto out ;
err :
dev - > stats . tx_errors + + ;
kfree_skb ( skb ) ;
out :
return ret ;
}
2018-02-21 01:31:13 +01:00
# else
static int ipvlan_process_v6_outbound ( struct sk_buff * skb )
{
return NET_XMIT_DROP ;
}
# endif
2014-11-23 23:07:46 -08:00
2016-07-25 14:38:16 -07:00
static int ipvlan_process_outbound ( struct sk_buff * skb )
2014-11-23 23:07:46 -08:00
{
struct ethhdr * ethh = eth_hdr ( skb ) ;
int ret = NET_XMIT_DROP ;
/* In this mode we dont care about multicast and broadcast traffic */
if ( is_multicast_ether_addr ( ethh - > h_dest ) ) {
2018-02-28 11:43:27 +01:00
pr_debug_ratelimited ( " Dropped {multi|broad}cast of type=[%x] \n " ,
ntohs ( skb - > protocol ) ) ;
2014-11-23 23:07:46 -08:00
kfree_skb ( skb ) ;
goto out ;
}
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
* will have L2 ; which need to discarded and processed further
* in the net - ns of the main - device .
*/
if ( skb_mac_header_was_set ( skb ) ) {
skb_pull ( skb , sizeof ( * ethh ) ) ;
skb - > mac_header = ( typeof ( skb - > mac_header ) ) ~ 0U ;
skb_reset_network_header ( skb ) ;
}
if ( skb - > protocol = = htons ( ETH_P_IPV6 ) )
2016-07-25 14:38:16 -07:00
ret = ipvlan_process_v6_outbound ( skb ) ;
2014-11-23 23:07:46 -08:00
else if ( skb - > protocol = = htons ( ETH_P_IP ) )
2016-07-25 14:38:16 -07:00
ret = ipvlan_process_v4_outbound ( skb ) ;
2014-11-23 23:07:46 -08:00
else {
pr_warn_ratelimited ( " Dropped outbound packet type=%x \n " ,
ntohs ( skb - > protocol ) ) ;
kfree_skb ( skb ) ;
}
out :
return ret ;
}
2015-05-04 17:06:03 -07:00
static void ipvlan_multicast_enqueue ( struct ipvl_port * port ,
2016-12-21 17:30:16 -08:00
struct sk_buff * skb , bool tx_pkt )
2015-05-04 17:06:03 -07:00
{
if ( skb - > protocol = = htons ( ETH_P_PAUSE ) ) {
kfree_skb ( skb ) ;
return ;
}
2016-12-21 17:30:16 -08:00
/* Record that the deferred packet is from TX or RX path. By
* looking at mac - addresses on packet will lead to erronus decisions .
* ( This would be true for a loopback - mode on master device or a
* hair - pin mode of the switch . )
*/
IPVL_SKB_CB ( skb ) - > tx_pkt = tx_pkt ;
2015-05-04 17:06:03 -07:00
spin_lock ( & port - > backlog . lock ) ;
if ( skb_queue_len ( & port - > backlog ) < IPVLAN_QBACKLOG_LIMIT ) {
2016-12-21 18:00:24 -08:00
if ( skb - > dev )
dev_hold ( skb - > dev ) ;
2015-05-04 17:06:03 -07:00
__skb_queue_tail ( & port - > backlog , skb ) ;
spin_unlock ( & port - > backlog . lock ) ;
schedule_work ( & port - > wq ) ;
} else {
spin_unlock ( & port - > backlog . lock ) ;
atomic_long_inc ( & skb - > dev - > rx_dropped ) ;
kfree_skb ( skb ) ;
}
}
2014-11-23 23:07:46 -08:00
static int ipvlan_xmit_mode_l3 ( struct sk_buff * skb , struct net_device * dev )
{
const struct ipvl_dev * ipvlan = netdev_priv ( dev ) ;
void * lyr3h ;
struct ipvl_addr * addr ;
int addr_type ;
2017-11-23 11:47:11 +08:00
lyr3h = ipvlan_get_L3_hdr ( ipvlan - > port , skb , & addr_type ) ;
2014-11-23 23:07:46 -08:00
if ( ! lyr3h )
goto out ;
2017-10-26 15:09:25 -07:00
if ( ! ipvlan_is_vepa ( ipvlan - > port ) ) {
addr = ipvlan_addr_lookup ( ipvlan - > port , lyr3h , addr_type , true ) ;
if ( addr ) {
if ( ipvlan_is_private ( ipvlan - > port ) ) {
consume_skb ( skb ) ;
return NET_XMIT_DROP ;
}
return ipvlan_rcv_frame ( addr , & skb , true ) ;
2017-10-26 15:09:21 -07:00
}
}
2014-11-23 23:07:46 -08:00
out :
2016-07-25 14:38:16 -07:00
ipvlan_skb_crossing_ns ( skb , ipvlan - > phy_dev ) ;
return ipvlan_process_outbound ( skb ) ;
2014-11-23 23:07:46 -08:00
}
static int ipvlan_xmit_mode_l2 ( struct sk_buff * skb , struct net_device * dev )
{
const struct ipvl_dev * ipvlan = netdev_priv ( dev ) ;
struct ethhdr * eth = eth_hdr ( skb ) ;
struct ipvl_addr * addr ;
void * lyr3h ;
int addr_type ;
2017-10-26 15:09:25 -07:00
if ( ! ipvlan_is_vepa ( ipvlan - > port ) & &
ether_addr_equal ( eth - > h_dest , eth - > h_source ) ) {
2017-11-23 11:47:11 +08:00
lyr3h = ipvlan_get_L3_hdr ( ipvlan - > port , skb , & addr_type ) ;
2014-11-23 23:07:46 -08:00
if ( lyr3h ) {
addr = ipvlan_addr_lookup ( ipvlan - > port , lyr3h , addr_type , true ) ;
2017-10-26 15:09:21 -07:00
if ( addr ) {
if ( ipvlan_is_private ( ipvlan - > port ) ) {
consume_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2015-11-16 22:34:26 +01:00
return ipvlan_rcv_frame ( addr , & skb , true ) ;
2017-10-26 15:09:21 -07:00
}
2014-11-23 23:07:46 -08:00
}
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( ! skb )
return NET_XMIT_DROP ;
/* Packet definitely does not belong to any of the
* virtual devices , but the dest is local . So forward
* the skb for the main - dev . At the RX side we just return
* RX_PASS for it to be processed further on the stack .
*/
return dev_forward_skb ( ipvlan - > phy_dev , skb ) ;
} else if ( is_multicast_ether_addr ( eth - > h_dest ) ) {
2016-07-25 14:38:16 -07:00
ipvlan_skb_crossing_ns ( skb , NULL ) ;
2016-12-21 17:30:16 -08:00
ipvlan_multicast_enqueue ( ipvlan - > port , skb , true ) ;
2015-05-04 17:06:03 -07:00
return NET_XMIT_SUCCESS ;
2014-11-23 23:07:46 -08:00
}
2017-12-13 14:40:26 -08:00
skb - > dev = ipvlan - > phy_dev ;
2014-11-23 23:07:46 -08:00
return dev_queue_xmit ( skb ) ;
}
int ipvlan_queue_xmit ( struct sk_buff * skb , struct net_device * dev )
{
struct ipvl_dev * ipvlan = netdev_priv ( dev ) ;
2015-07-14 16:35:54 +03:00
struct ipvl_port * port = ipvlan_port_get_rcu_bh ( ipvlan - > phy_dev ) ;
2014-11-23 23:07:46 -08:00
if ( ! port )
goto out ;
if ( unlikely ( ! pskb_may_pull ( skb , sizeof ( struct ethhdr ) ) ) )
goto out ;
switch ( port - > mode ) {
case IPVLAN_MODE_L2 :
return ipvlan_xmit_mode_l2 ( skb , dev ) ;
case IPVLAN_MODE_L3 :
2019-02-08 13:55:31 +01:00
# ifdef CONFIG_IPVLAN_L3S
2016-09-16 12:59:19 -07:00
case IPVLAN_MODE_L3S :
2019-02-08 13:55:31 +01:00
# endif
2014-11-23 23:07:46 -08:00
return ipvlan_xmit_mode_l3 ( skb , dev ) ;
}
/* Should not reach here */
WARN_ONCE ( true , " ipvlan_queue_xmit() called for mode = [%hx] \n " ,
port - > mode ) ;
out :
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
static bool ipvlan_external_frame ( struct sk_buff * skb , struct ipvl_port * port )
{
struct ethhdr * eth = eth_hdr ( skb ) ;
struct ipvl_addr * addr ;
void * lyr3h ;
int addr_type ;
if ( ether_addr_equal ( eth - > h_source , skb - > dev - > dev_addr ) ) {
2017-11-23 11:47:11 +08:00
lyr3h = ipvlan_get_L3_hdr ( port , skb , & addr_type ) ;
2014-11-23 23:07:46 -08:00
if ( ! lyr3h )
return true ;
addr = ipvlan_addr_lookup ( port , lyr3h , addr_type , false ) ;
if ( addr )
return false ;
}
return true ;
}
static rx_handler_result_t ipvlan_handle_mode_l3 ( struct sk_buff * * pskb ,
struct ipvl_port * port )
{
void * lyr3h ;
int addr_type ;
struct ipvl_addr * addr ;
struct sk_buff * skb = * pskb ;
rx_handler_result_t ret = RX_HANDLER_PASS ;
2017-11-23 11:47:11 +08:00
lyr3h = ipvlan_get_L3_hdr ( port , skb , & addr_type ) ;
2014-11-23 23:07:46 -08:00
if ( ! lyr3h )
goto out ;
addr = ipvlan_addr_lookup ( port , lyr3h , addr_type , true ) ;
if ( addr )
2015-11-16 22:34:26 +01:00
ret = ipvlan_rcv_frame ( addr , pskb , false ) ;
2014-11-23 23:07:46 -08:00
out :
return ret ;
}
static rx_handler_result_t ipvlan_handle_mode_l2 ( struct sk_buff * * pskb ,
struct ipvl_port * port )
{
struct sk_buff * skb = * pskb ;
struct ethhdr * eth = eth_hdr ( skb ) ;
rx_handler_result_t ret = RX_HANDLER_PASS ;
if ( is_multicast_ether_addr ( eth - > h_dest ) ) {
2015-05-04 17:06:03 -07:00
if ( ipvlan_external_frame ( skb , port ) ) {
struct sk_buff * nskb = skb_clone ( skb , GFP_ATOMIC ) ;
/* External frames are queued for device local
* distribution , but a copy is given to master
* straight away to avoid sending duplicates later
* when work - queue processes this frame . This is
* achieved by returning RX_HANDLER_PASS .
*/
2016-07-25 14:38:16 -07:00
if ( nskb ) {
ipvlan_skb_crossing_ns ( nskb , NULL ) ;
2016-12-21 17:30:16 -08:00
ipvlan_multicast_enqueue ( port , nskb , false ) ;
2016-07-25 14:38:16 -07:00
}
2015-05-04 17:06:03 -07:00
}
2014-11-23 23:07:46 -08:00
} else {
2017-12-06 19:04:26 +08:00
/* Perform like l3 mode for non-multicast packet */
ret = ipvlan_handle_mode_l3 ( pskb , port ) ;
2014-11-23 23:07:46 -08:00
}
return ret ;
}
rx_handler_result_t ipvlan_handle_frame ( struct sk_buff * * pskb )
{
struct sk_buff * skb = * pskb ;
struct ipvl_port * port = ipvlan_port_get_rcu ( skb - > dev ) ;
if ( ! port )
return RX_HANDLER_PASS ;
switch ( port - > mode ) {
case IPVLAN_MODE_L2 :
return ipvlan_handle_mode_l2 ( pskb , port ) ;
case IPVLAN_MODE_L3 :
return ipvlan_handle_mode_l3 ( pskb , port ) ;
2019-02-08 13:55:31 +01:00
# ifdef CONFIG_IPVLAN_L3S
2016-09-16 12:59:19 -07:00
case IPVLAN_MODE_L3S :
return RX_HANDLER_PASS ;
2019-02-08 13:55:31 +01:00
# endif
2014-11-23 23:07:46 -08:00
}
/* Should not reach here */
WARN_ONCE ( true , " ipvlan_handle_frame() called for mode = [%hx] \n " ,
port - > mode ) ;
kfree_skb ( skb ) ;
2015-11-16 22:44:53 +01:00
return RX_HANDLER_CONSUMED ;
2014-11-23 23:07:46 -08:00
}