2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* INET An implementation of the TCP / IP protocol suite for the LINUX
* operating system . INET is implemented using the BSD Socket
* interface as the means of communication with the user level .
*
* Pseudo - driver for the loopback interface .
*
* Version : @ ( # ) loopback . c 1.0 .4 b 08 / 16 / 93
*
2005-05-06 03:16:16 +04:00
* Authors : Ross Biro
2005-04-17 02:20:36 +04:00
* Fred N . van Kempen , < waltje @ uWalt . NL . Mugnet . ORG >
* Donald Becker , < becker @ scyld . com >
*
* Alan Cox : Fixed oddments for NET3 .014
* Alan Cox : Rejig for NET3 .029 snap # 3
2017-03-11 23:06:01 +03:00
* Alan Cox : Fixed NET3 .029 bugs and sped up
2005-04-17 02:20:36 +04:00
* Larry McVoy : Tiny tweak to double performance
* Alan Cox : Backed out LMV ' s tweak - the linux mm
* can ' t take it . . .
* Michael Griffith : Don ' t bother computing the checksums
* on packets received on the loopback
* interface .
* Alexey Kuznetsov : Potential hang under some extreme
* cases removed .
*/
# include <linux/kernel.h>
# include <linux/jiffies.h>
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/fs.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/socket.h>
# include <linux/errno.h>
# include <linux/fcntl.h>
# include <linux/in.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2017-03-11 23:06:01 +03:00
# include <linux/io.h>
2005-04-17 02:20:36 +04:00
# include <linux/inet.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/skbuff.h>
# include <linux/ethtool.h>
# include <net/sock.h>
# include <net/checksum.h>
# include <linux/if_ether.h> /* For the statistics structure. */
# include <linux/if_arp.h> /* For ARPHRD_ETHER */
# include <linux/ip.h>
# include <linux/tcp.h>
# include <linux/percpu.h>
2017-03-11 23:06:54 +03:00
# include <linux/net_tstamp.h>
2007-09-27 09:10:56 +04:00
# include <net/net_namespace.h>
2010-06-22 16:44:11 +04:00
# include <linux/u64_stats_sync.h>
2005-04-17 02:20:36 +04:00
2019-07-02 00:38:49 +03:00
/* blackhole_netdev - a device used for dsts that are marked expired!
* This is global device ( instead of per - net - ns ) since it ' s not needed
* to be per - ns and gets initialized at boot time .
*/
struct net_device * blackhole_netdev ;
EXPORT_SYMBOL ( blackhole_netdev ) ;
2017-03-11 23:06:01 +03:00
/* The higher levels take care of making this non-reentrant (it's
2005-04-17 02:20:36 +04:00
* called with bh ' s disabled ) .
*/
2009-08-31 23:50:58 +04:00
static netdev_tx_t loopback_xmit ( struct sk_buff * skb ,
struct net_device * dev )
2005-04-17 02:20:36 +04:00
{
2010-02-16 18:21:08 +03:00
struct pcpu_lstats * lb_stats ;
2009-04-18 02:03:10 +04:00
int len ;
2005-04-17 02:20:36 +04:00
2017-03-11 23:06:54 +03:00
skb_tx_timestamp ( skb ) ;
2018-10-20 05:11:26 +03:00
/* do not fool net_timestamp_check() with various clock bases */
skb - > tstamp = 0 ;
2005-04-17 02:20:36 +04:00
skb_orphan ( skb ) ;
2013-01-25 11:44:41 +04:00
/* Before queueing this packet to netif_rx(),
* make sure dst is refcounted .
*/
skb_dst_force ( skb ) ;
2009-04-18 02:03:10 +04:00
skb - > protocol = eth_type_trans ( skb , dev ) ;
2005-04-17 02:20:36 +04:00
2007-09-28 04:09:39 +04:00
/* it's OK to use per_cpu_ptr() because BHs are off */
2010-09-24 03:51:51 +04:00
lb_stats = this_cpu_ptr ( dev - > lstats ) ;
2005-04-17 02:20:36 +04:00
2009-04-18 02:03:10 +04:00
len = skb - > len ;
if ( likely ( netif_rx ( skb ) = = NET_RX_SUCCESS ) ) {
2010-06-22 16:44:11 +04:00
u64_stats_update_begin ( & lb_stats - > syncp ) ;
2009-04-18 02:03:10 +04:00
lb_stats - > bytes + = len ;
lb_stats - > packets + + ;
2010-06-22 16:44:11 +04:00
u64_stats_update_end ( & lb_stats - > syncp ) ;
2010-10-01 01:06:55 +04:00
}
2005-04-17 02:20:36 +04:00
2009-06-23 10:03:08 +04:00
return NETDEV_TX_OK ;
2005-04-17 02:20:36 +04:00
}
2017-01-07 06:12:52 +03:00
static void loopback_get_stats64 ( struct net_device * dev ,
struct rtnl_link_stats64 * stats )
2005-04-17 02:20:36 +04:00
{
2010-06-14 09:59:22 +04:00
u64 bytes = 0 ;
u64 packets = 0 ;
2005-04-17 02:20:36 +04:00
int i ;
2006-03-28 13:56:37 +04:00
for_each_possible_cpu ( i ) {
2006-10-19 07:51:57 +04:00
const struct pcpu_lstats * lb_stats ;
2010-06-22 16:44:11 +04:00
u64 tbytes , tpackets ;
unsigned int start ;
2005-04-17 02:20:36 +04:00
2010-09-24 03:51:51 +04:00
lb_stats = per_cpu_ptr ( dev - > lstats , i ) ;
2010-06-22 16:44:11 +04:00
do {
2014-03-14 08:26:42 +04:00
start = u64_stats_fetch_begin_irq ( & lb_stats - > syncp ) ;
2010-06-22 16:44:11 +04:00
tbytes = lb_stats - > bytes ;
tpackets = lb_stats - > packets ;
2014-03-14 08:26:42 +04:00
} while ( u64_stats_fetch_retry_irq ( & lb_stats - > syncp , start ) ) ;
2010-06-22 16:44:11 +04:00
bytes + = tbytes ;
packets + = tpackets ;
2005-04-17 02:20:36 +04:00
}
2006-10-19 07:51:57 +04:00
stats - > rx_packets = packets ;
stats - > tx_packets = packets ;
2009-04-18 02:03:10 +04:00
stats - > rx_bytes = bytes ;
stats - > tx_bytes = bytes ;
2005-04-17 02:20:36 +04:00
}
2006-09-28 07:33:34 +04:00
static u32 always_on ( struct net_device * dev )
2005-04-17 02:20:36 +04:00
{
return 1 ;
}
2006-09-13 22:30:00 +04:00
static const struct ethtool_ops loopback_ethtool_ops = {
2006-09-28 07:33:34 +04:00
. get_link = always_on ,
2019-04-12 14:06:14 +03:00
. get_ts_info = ethtool_op_get_ts_info ,
2005-04-17 02:20:36 +04:00
} ;
2007-09-27 09:08:12 +04:00
static int loopback_dev_init ( struct net_device * dev )
{
2014-02-13 23:46:28 +04:00
dev - > lstats = netdev_alloc_pcpu_stats ( struct pcpu_lstats ) ;
2010-09-24 03:51:51 +04:00
if ( ! dev - > lstats )
2007-09-27 09:08:12 +04:00
return - ENOMEM ;
return 0 ;
}
static void loopback_dev_free ( struct net_device * dev )
{
2013-09-17 03:52:41 +04:00
dev_net ( dev ) - > loopback_dev = NULL ;
2010-09-24 03:51:51 +04:00
free_percpu ( dev - > lstats ) ;
2007-09-27 09:08:12 +04:00
}
2008-11-20 08:46:18 +03:00
static const struct net_device_ops loopback_ops = {
2017-03-11 23:06:01 +03:00
. ndo_init = loopback_dev_init ,
. ndo_start_xmit = loopback_xmit ,
2010-06-14 09:59:22 +04:00
. ndo_get_stats64 = loopback_get_stats64 ,
2014-02-12 05:21:26 +04:00
. ndo_set_mac_address = eth_mac_addr ,
2008-11-20 08:46:18 +03:00
} ;
2019-07-02 00:38:49 +03:00
static void gen_lo_setup ( struct net_device * dev ,
unsigned int mtu ,
const struct ethtool_ops * eth_ops ,
const struct header_ops * hdr_ops ,
const struct net_device_ops * dev_ops ,
void ( * dev_destructor ) ( struct net_device * dev ) )
2007-09-26 06:18:04 +04:00
{
2019-07-02 00:38:49 +03:00
dev - > mtu = mtu ;
2007-09-26 06:18:04 +04:00
dev - > hard_header_len = ETH_HLEN ; /* 14 */
2017-02-07 23:57:20 +03:00
dev - > min_header_len = ETH_HLEN ; /* 14 */
2007-09-26 06:18:04 +04:00
dev - > addr_len = ETH_ALEN ; /* 6 */
dev - > type = ARPHRD_LOOPBACK ; /* 0x0001*/
dev - > flags = IFF_LOOPBACK ;
2015-08-18 11:30:32 +03:00
dev - > priv_flags | = IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE ;
2014-10-06 05:38:35 +04:00
netif_keep_dst ( dev ) ;
2016-06-02 21:05:38 +03:00
dev - > hw_features = NETIF_F_GSO_SOFTWARE ;
2017-03-11 23:06:01 +03:00
dev - > features = NETIF_F_SG | NETIF_F_FRAGLIST
2016-06-02 21:05:38 +03:00
| NETIF_F_GSO_SOFTWARE
2011-11-15 19:29:55 +04:00
| NETIF_F_HW_CSUM
2011-02-15 19:59:18 +03:00
| NETIF_F_RXCSUM
2015-12-14 22:19:41 +03:00
| NETIF_F_SCTP_CRC
2007-09-26 06:18:04 +04:00
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
2011-04-14 10:07:04 +04:00
| NETIF_F_NETNS_LOCAL
2011-05-04 19:30:11 +04:00
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK ;
2019-07-02 00:38:49 +03:00
dev - > ethtool_ops = eth_ops ;
dev - > header_ops = hdr_ops ;
dev - > netdev_ops = dev_ops ;
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 19:52:56 +03:00
dev - > needs_free_netdev = true ;
2019-07-02 00:38:49 +03:00
dev - > priv_destructor = dev_destructor ;
}
/* The loopback device is special. There is only one instance
* per network namespace .
*/
static void loopback_setup ( struct net_device * dev )
{
gen_lo_setup ( dev , ( 64 * 1024 ) , & loopback_ethtool_ops , & eth_header_ops ,
& loopback_ops , loopback_dev_free ) ;
2007-09-26 06:18:04 +04:00
}
2007-09-26 06:16:28 +04:00
2005-08-19 01:05:18 +04:00
/* Setup and register the loopback device. */
2007-10-09 07:38:39 +04:00
static __net_init int loopback_net_init ( struct net * net )
2005-04-17 02:20:36 +04:00
{
2007-09-26 06:18:04 +04:00
struct net_device * dev ;
int err ;
err = - ENOMEM ;
net: set name_assign_type in alloc_netdev()
Extend alloc_netdev{,_mq{,s}}() to take name_assign_type as argument, and convert
all users to pass NET_NAME_UNKNOWN.
Coccinelle patch:
@@
expression sizeof_priv, name, setup, txqs, rxqs, count;
@@
(
-alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs)
+alloc_netdev_mqs(sizeof_priv, name, NET_NAME_UNKNOWN, setup, txqs, rxqs)
|
-alloc_netdev_mq(sizeof_priv, name, setup, count)
+alloc_netdev_mq(sizeof_priv, name, NET_NAME_UNKNOWN, setup, count)
|
-alloc_netdev(sizeof_priv, name, setup)
+alloc_netdev(sizeof_priv, name, NET_NAME_UNKNOWN, setup)
)
v9: move comments here from the wrong commit
Signed-off-by: Tom Gundersen <teg@jklm.no>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-14 18:37:24 +04:00
dev = alloc_netdev ( 0 , " lo " , NET_NAME_UNKNOWN , loopback_setup ) ;
2007-09-26 06:18:04 +04:00
if ( ! dev )
goto out ;
2007-07-31 03:37:19 +04:00
2008-03-25 15:47:49 +03:00
dev_net_set ( dev , net ) ;
2007-09-26 06:18:04 +04:00
err = register_netdev ( dev ) ;
2007-07-31 03:37:19 +04:00
if ( err )
2007-09-26 06:18:04 +04:00
goto out_free_netdev ;
2007-07-31 03:37:19 +04:00
2012-08-09 01:53:36 +04:00
BUG_ON ( dev - > ifindex ! = LOOPBACK_IFINDEX ) ;
2007-09-27 09:10:56 +04:00
net - > loopback_dev = dev ;
2007-10-15 23:55:33 +04:00
return 0 ;
2007-09-26 06:18:04 +04:00
out_free_netdev :
free_netdev ( dev ) ;
2007-10-15 23:55:33 +04:00
out :
2009-11-26 02:14:13 +03:00
if ( net_eq ( net , & init_net ) )
2007-10-15 23:55:33 +04:00
panic ( " loopback: Failed to register netdevice: %d \n " , err ) ;
return err ;
2007-09-26 06:18:04 +04:00
}
2008-11-08 09:54:20 +03:00
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
2017-03-11 23:06:01 +03:00
. init = loopback_net_init ,
2007-09-27 09:10:56 +04:00
} ;
2019-07-02 00:38:49 +03:00
/* blackhole netdevice */
static netdev_tx_t blackhole_netdev_xmit ( struct sk_buff * skb ,
struct net_device * dev )
{
kfree_skb ( skb ) ;
net_warn_ratelimited ( " %s(): Dropping skb. \n " , __func__ ) ;
return NETDEV_TX_OK ;
}
static const struct net_device_ops blackhole_netdev_ops = {
. ndo_start_xmit = blackhole_netdev_xmit ,
} ;
/* This is a dst-dummy device used specifically for invalidated
* DSTs and unlike loopback , this is not per - ns .
*/
static void blackhole_netdev_setup ( struct net_device * dev )
{
gen_lo_setup ( dev , ETH_MIN_MTU , NULL , NULL , & blackhole_netdev_ops , NULL ) ;
}
/* Setup and register the blackhole_netdev. */
static int __init blackhole_netdev_init ( void )
{
blackhole_netdev = alloc_netdev ( 0 , " blackhole_dev " , NET_NAME_UNKNOWN ,
blackhole_netdev_setup ) ;
if ( ! blackhole_netdev )
return - ENOMEM ;
dev_init_scheduler ( blackhole_netdev ) ;
dev_activate ( blackhole_netdev ) ;
blackhole_netdev - > flags | = IFF_UP | IFF_RUNNING ;
dev_net_set ( blackhole_netdev , & init_net ) ;
return 0 ;
}
device_initcall ( blackhole_netdev_init ) ;