2012-11-17 06:27:13 +04:00
/*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* Copyright ( c ) 2012 Intel Corporation . All rights reserved .
2015-04-09 17:33:20 +03:00
* Copyright ( C ) 2015 EMC Corporation . All Rights Reserved .
2012-11-17 06:27:13 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* BSD LICENSE
*
* Copyright ( c ) 2012 Intel Corporation . All rights reserved .
2015-04-09 17:33:20 +03:00
* Copyright ( C ) 2015 EMC Corporation . All Rights Reserved .
2012-11-17 06:27:13 +04:00
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* * Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* * Redistributions in binary form must reproduce the above copy
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
2015-04-09 17:33:20 +03:00
* PCIe NTB Network Linux driver
2012-11-17 06:27:13 +04:00
*
* Contact Information :
* Jon Mason < jon . mason @ intel . com >
*/
# include <linux/etherdevice.h>
# include <linux/ethtool.h>
# include <linux/module.h>
# include <linux/pci.h>
2015-04-09 17:33:20 +03:00
# include <linux/ntb.h>
2015-05-07 13:45:21 +03:00
# include <linux/ntb_transport.h>
2012-11-17 06:27:13 +04:00
2013-01-19 13:02:35 +04:00
# define NTB_NETDEV_VER "0.7"
2012-11-17 06:27:13 +04:00
MODULE_DESCRIPTION ( KBUILD_MODNAME ) ;
MODULE_VERSION ( NTB_NETDEV_VER ) ;
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
MODULE_AUTHOR ( " Intel Corporation " ) ;
2015-07-13 15:07:17 +03:00
/* Time in usecs for tx resource reaper */
static unsigned int tx_time = 1 ;
/* Number of descriptors to free before resuming tx */
static unsigned int tx_start = 10 ;
/* Number of descriptors still available before stop upper layer tx */
static unsigned int tx_stop = 5 ;
2012-11-17 06:27:13 +04:00
struct ntb_netdev {
struct list_head list ;
struct pci_dev * pdev ;
struct net_device * ndev ;
struct ntb_transport_qp * qp ;
2015-07-13 15:07:17 +03:00
struct timer_list tx_timer ;
2012-11-17 06:27:13 +04:00
} ;
# define NTB_TX_TIMEOUT_MS 1000
# define NTB_RXQ_SIZE 100
static LIST_HEAD ( dev_list ) ;
2015-04-09 17:33:20 +03:00
static void ntb_netdev_event_handler ( void * data , int link_is_up )
2012-11-17 06:27:13 +04:00
{
struct net_device * ndev = data ;
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
2015-04-09 17:33:20 +03:00
netdev_dbg ( ndev , " Event %x, Link %x \n " , link_is_up ,
2012-11-17 06:27:13 +04:00
ntb_transport_link_query ( dev - > qp ) ) ;
2015-04-09 17:33:20 +03:00
if ( link_is_up ) {
if ( ntb_transport_link_query ( dev - > qp ) )
netif_carrier_on ( ndev ) ;
} else {
2012-11-17 06:27:13 +04:00
netif_carrier_off ( ndev ) ;
2013-07-30 03:31:18 +04:00
}
2012-11-17 06:27:13 +04:00
}
static void ntb_netdev_rx_handler ( struct ntb_transport_qp * qp , void * qp_data ,
void * data , int len )
{
struct net_device * ndev = qp_data ;
struct sk_buff * skb ;
int rc ;
skb = data ;
if ( ! skb )
return ;
netdev_dbg ( ndev , " %s: %d byte payload received \n " , __func__ , len ) ;
2015-07-13 15:07:08 +03:00
if ( len < 0 ) {
ndev - > stats . rx_errors + + ;
ndev - > stats . rx_length_errors + + ;
goto enqueue_again ;
}
2012-11-17 06:27:13 +04:00
skb_put ( skb , len ) ;
skb - > protocol = eth_type_trans ( skb , ndev ) ;
skb - > ip_summed = CHECKSUM_NONE ;
if ( netif_rx ( skb ) = = NET_RX_DROP ) {
ndev - > stats . rx_errors + + ;
ndev - > stats . rx_dropped + + ;
} else {
ndev - > stats . rx_packets + + ;
ndev - > stats . rx_bytes + = len ;
}
skb = netdev_alloc_skb ( ndev , ndev - > mtu + ETH_HLEN ) ;
if ( ! skb ) {
ndev - > stats . rx_errors + + ;
ndev - > stats . rx_frame_errors + + ;
return ;
}
2015-07-13 15:07:08 +03:00
enqueue_again :
2012-11-17 06:27:13 +04:00
rc = ntb_transport_rx_enqueue ( qp , skb , skb - > data , ndev - > mtu + ETH_HLEN ) ;
if ( rc ) {
2013-01-19 13:02:31 +04:00
dev_kfree_skb ( skb ) ;
2012-11-17 06:27:13 +04:00
ndev - > stats . rx_errors + + ;
ndev - > stats . rx_fifo_errors + + ;
}
}
2015-07-13 15:07:17 +03:00
static int __ntb_netdev_maybe_stop_tx ( struct net_device * netdev ,
struct ntb_transport_qp * qp , int size )
{
struct ntb_netdev * dev = netdev_priv ( netdev ) ;
netif_stop_queue ( netdev ) ;
/* Make sure to see the latest value of ntb_transport_tx_free_entry()
* since the queue was last started .
*/
smp_mb ( ) ;
if ( likely ( ntb_transport_tx_free_entry ( qp ) < size ) ) {
mod_timer ( & dev - > tx_timer , jiffies + usecs_to_jiffies ( tx_time ) ) ;
return - EBUSY ;
}
netif_start_queue ( netdev ) ;
return 0 ;
}
static int ntb_netdev_maybe_stop_tx ( struct net_device * ndev ,
struct ntb_transport_qp * qp , int size )
{
if ( netif_queue_stopped ( ndev ) | |
( ntb_transport_tx_free_entry ( qp ) > = size ) )
return 0 ;
return __ntb_netdev_maybe_stop_tx ( ndev , qp , size ) ;
}
2012-11-17 06:27:13 +04:00
static void ntb_netdev_tx_handler ( struct ntb_transport_qp * qp , void * qp_data ,
void * data , int len )
{
struct net_device * ndev = qp_data ;
struct sk_buff * skb ;
2015-07-13 15:07:17 +03:00
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
2012-11-17 06:27:13 +04:00
skb = data ;
if ( ! skb | | ! ndev )
return ;
if ( len > 0 ) {
ndev - > stats . tx_packets + + ;
ndev - > stats . tx_bytes + = skb - > len ;
} else {
ndev - > stats . tx_errors + + ;
ndev - > stats . tx_aborted_errors + + ;
}
dev_kfree_skb ( skb ) ;
2015-07-13 15:07:17 +03:00
if ( ntb_transport_tx_free_entry ( dev - > qp ) > = tx_start ) {
/* Make sure anybody stopping the queue after this sees the new
* value of ntb_transport_tx_free_entry ( )
*/
smp_mb ( ) ;
if ( netif_queue_stopped ( ndev ) )
netif_wake_queue ( ndev ) ;
}
2012-11-17 06:27:13 +04:00
}
static netdev_tx_t ntb_netdev_start_xmit ( struct sk_buff * skb ,
struct net_device * ndev )
{
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
int rc ;
2015-07-13 15:07:17 +03:00
ntb_netdev_maybe_stop_tx ( ndev , dev - > qp , tx_stop ) ;
2012-11-17 06:27:13 +04:00
rc = ntb_transport_tx_enqueue ( dev - > qp , skb , skb - > data , skb - > len ) ;
if ( rc )
goto err ;
2015-07-13 15:07:17 +03:00
/* check for next submit */
ntb_netdev_maybe_stop_tx ( ndev , dev - > qp , tx_stop ) ;
2012-11-17 06:27:13 +04:00
return NETDEV_TX_OK ;
err :
ndev - > stats . tx_dropped + + ;
ndev - > stats . tx_errors + + ;
return NETDEV_TX_BUSY ;
}
2015-07-13 15:07:17 +03:00
static void ntb_netdev_tx_timer ( unsigned long data )
{
struct net_device * ndev = ( struct net_device * ) data ;
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
if ( ntb_transport_tx_free_entry ( dev - > qp ) < tx_stop ) {
mod_timer ( & dev - > tx_timer , jiffies + msecs_to_jiffies ( tx_time ) ) ;
} else {
/* Make sure anybody stopping the queue after this sees the new
* value of ntb_transport_tx_free_entry ( )
*/
smp_mb ( ) ;
if ( netif_queue_stopped ( ndev ) )
netif_wake_queue ( ndev ) ;
}
}
2012-11-17 06:27:13 +04:00
static int ntb_netdev_open ( struct net_device * ndev )
{
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
struct sk_buff * skb ;
int rc , i , len ;
/* Add some empty rx bufs */
for ( i = 0 ; i < NTB_RXQ_SIZE ; i + + ) {
skb = netdev_alloc_skb ( ndev , ndev - > mtu + ETH_HLEN ) ;
if ( ! skb ) {
rc = - ENOMEM ;
goto err ;
}
rc = ntb_transport_rx_enqueue ( dev - > qp , skb , skb - > data ,
ndev - > mtu + ETH_HLEN ) ;
2015-07-13 15:07:10 +03:00
if ( rc ) {
2013-11-23 03:50:57 +04:00
dev_kfree_skb ( skb ) ;
2012-11-17 06:27:13 +04:00
goto err ;
2013-11-23 03:50:57 +04:00
}
2012-11-17 06:27:13 +04:00
}
2015-07-13 15:07:17 +03:00
setup_timer ( & dev - > tx_timer , ntb_netdev_tx_timer , ( unsigned long ) ndev ) ;
2012-11-17 06:27:13 +04:00
netif_carrier_off ( ndev ) ;
ntb_transport_link_up ( dev - > qp ) ;
2015-07-13 15:07:17 +03:00
netif_start_queue ( ndev ) ;
2012-11-17 06:27:13 +04:00
return 0 ;
err :
while ( ( skb = ntb_transport_rx_remove ( dev - > qp , & len ) ) )
dev_kfree_skb ( skb ) ;
return rc ;
}
static int ntb_netdev_close ( struct net_device * ndev )
{
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
struct sk_buff * skb ;
int len ;
ntb_transport_link_down ( dev - > qp ) ;
while ( ( skb = ntb_transport_rx_remove ( dev - > qp , & len ) ) )
dev_kfree_skb ( skb ) ;
2015-07-13 15:07:17 +03:00
del_timer_sync ( & dev - > tx_timer ) ;
2012-11-17 06:27:13 +04:00
return 0 ;
}
static int ntb_netdev_change_mtu ( struct net_device * ndev , int new_mtu )
{
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
struct sk_buff * skb ;
int len , rc ;
if ( new_mtu > ntb_transport_max_size ( dev - > qp ) - ETH_HLEN )
return - EINVAL ;
if ( ! netif_running ( ndev ) ) {
ndev - > mtu = new_mtu ;
return 0 ;
}
/* Bring down the link and dispose of posted rx entries */
ntb_transport_link_down ( dev - > qp ) ;
if ( ndev - > mtu < new_mtu ) {
int i ;
for ( i = 0 ; ( skb = ntb_transport_rx_remove ( dev - > qp , & len ) ) ; i + + )
dev_kfree_skb ( skb ) ;
for ( ; i ; i - - ) {
skb = netdev_alloc_skb ( ndev , new_mtu + ETH_HLEN ) ;
if ( ! skb ) {
rc = - ENOMEM ;
goto err ;
}
rc = ntb_transport_rx_enqueue ( dev - > qp , skb , skb - > data ,
new_mtu + ETH_HLEN ) ;
if ( rc ) {
dev_kfree_skb ( skb ) ;
goto err ;
}
}
}
ndev - > mtu = new_mtu ;
ntb_transport_link_up ( dev - > qp ) ;
return 0 ;
err :
ntb_transport_link_down ( dev - > qp ) ;
while ( ( skb = ntb_transport_rx_remove ( dev - > qp , & len ) ) )
dev_kfree_skb ( skb ) ;
netdev_err ( ndev , " Error changing MTU, device inoperable \n " ) ;
return rc ;
}
static const struct net_device_ops ntb_netdev_ops = {
. ndo_open = ntb_netdev_open ,
. ndo_stop = ntb_netdev_close ,
. ndo_start_xmit = ntb_netdev_start_xmit ,
. ndo_change_mtu = ntb_netdev_change_mtu ,
. ndo_set_mac_address = eth_mac_addr ,
} ;
static void ntb_get_drvinfo ( struct net_device * ndev ,
struct ethtool_drvinfo * info )
{
struct ntb_netdev * dev = netdev_priv ( ndev ) ;
strlcpy ( info - > driver , KBUILD_MODNAME , sizeof ( info - > driver ) ) ;
strlcpy ( info - > version , NTB_NETDEV_VER , sizeof ( info - > version ) ) ;
strlcpy ( info - > bus_info , pci_name ( dev - > pdev ) , sizeof ( info - > bus_info ) ) ;
}
2017-03-10 01:10:13 +03:00
static int ntb_get_link_ksettings ( struct net_device * dev ,
struct ethtool_link_ksettings * cmd )
2012-11-17 06:27:13 +04:00
{
2017-03-10 01:10:13 +03:00
ethtool_link_ksettings_zero_link_mode ( cmd , supported ) ;
ethtool_link_ksettings_add_link_mode ( cmd , supported , Backplane ) ;
ethtool_link_ksettings_zero_link_mode ( cmd , advertising ) ;
ethtool_link_ksettings_add_link_mode ( cmd , advertising , Backplane ) ;
cmd - > base . speed = SPEED_UNKNOWN ;
cmd - > base . duplex = DUPLEX_FULL ;
cmd - > base . port = PORT_OTHER ;
cmd - > base . phy_address = 0 ;
cmd - > base . autoneg = AUTONEG_ENABLE ;
2012-11-17 06:27:13 +04:00
return 0 ;
}
static const struct ethtool_ops ntb_ethtool_ops = {
. get_drvinfo = ntb_get_drvinfo ,
. get_link = ethtool_op_get_link ,
2017-03-10 01:10:13 +03:00
. get_link_ksettings = ntb_get_link_ksettings ,
2012-11-17 06:27:13 +04:00
} ;
static const struct ntb_queue_handlers ntb_netdev_handlers = {
. tx_handler = ntb_netdev_tx_handler ,
. rx_handler = ntb_netdev_rx_handler ,
. event_handler = ntb_netdev_event_handler ,
} ;
2015-04-09 17:33:20 +03:00
static int ntb_netdev_probe ( struct device * client_dev )
2012-11-17 06:27:13 +04:00
{
2015-04-09 17:33:20 +03:00
struct ntb_dev * ntb ;
2012-11-17 06:27:13 +04:00
struct net_device * ndev ;
2015-04-09 17:33:20 +03:00
struct pci_dev * pdev ;
2012-11-17 06:27:13 +04:00
struct ntb_netdev * dev ;
int rc ;
2015-04-09 17:33:20 +03:00
ntb = dev_ntb ( client_dev - > parent ) ;
pdev = ntb - > pdev ;
if ( ! pdev )
return - ENODEV ;
ndev = alloc_etherdev ( sizeof ( * dev ) ) ;
2012-11-17 06:27:13 +04:00
if ( ! ndev )
return - ENOMEM ;
dev = netdev_priv ( ndev ) ;
dev - > ndev = ndev ;
dev - > pdev = pdev ;
ndev - > features = NETIF_F_HIGHDMA ;
ndev - > priv_flags | = IFF_LIVE_ADDR_CHANGE ;
ndev - > hw_features = ndev - > features ;
ndev - > watchdog_timeo = msecs_to_jiffies ( NTB_TX_TIMEOUT_MS ) ;
random_ether_addr ( ndev - > perm_addr ) ;
memcpy ( ndev - > dev_addr , ndev - > perm_addr , ndev - > addr_len ) ;
ndev - > netdev_ops = & ntb_netdev_ops ;
2014-05-11 04:12:32 +04:00
ndev - > ethtool_ops = & ntb_ethtool_ops ;
2012-11-17 06:27:13 +04:00
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-20 20:55:20 +03:00
ndev - > min_mtu = 0 ;
ndev - > max_mtu = ETH_MAX_MTU ;
2015-04-09 17:33:20 +03:00
dev - > qp = ntb_transport_create_queue ( ndev , client_dev ,
& ntb_netdev_handlers ) ;
2012-11-17 06:27:13 +04:00
if ( ! dev - > qp ) {
rc = - EIO ;
goto err ;
}
ndev - > mtu = ntb_transport_max_size ( dev - > qp ) - ETH_HLEN ;
rc = register_netdev ( ndev ) ;
if ( rc )
goto err1 ;
list_add ( & dev - > list , & dev_list ) ;
2013-01-19 13:02:34 +04:00
dev_info ( & pdev - > dev , " %s created \n " , ndev - > name ) ;
2012-11-17 06:27:13 +04:00
return 0 ;
err1 :
ntb_transport_free_queue ( dev - > qp ) ;
err :
free_netdev ( ndev ) ;
return rc ;
}
2015-04-09 17:33:20 +03:00
static void ntb_netdev_remove ( struct device * client_dev )
2012-11-17 06:27:13 +04:00
{
2015-04-09 17:33:20 +03:00
struct ntb_dev * ntb ;
2012-11-17 06:27:13 +04:00
struct net_device * ndev ;
2015-04-09 17:33:20 +03:00
struct pci_dev * pdev ;
2012-11-17 06:27:13 +04:00
struct ntb_netdev * dev ;
2013-11-26 22:21:50 +04:00
bool found = false ;
2012-11-17 06:27:13 +04:00
2015-04-09 17:33:20 +03:00
ntb = dev_ntb ( client_dev - > parent ) ;
pdev = ntb - > pdev ;
2012-11-17 06:27:13 +04:00
list_for_each_entry ( dev , & dev_list , list ) {
2013-11-23 03:44:13 +04:00
if ( dev - > pdev = = pdev ) {
found = true ;
2012-11-17 06:27:13 +04:00
break ;
2013-11-23 03:44:13 +04:00
}
2012-11-17 06:27:13 +04:00
}
2013-11-23 03:44:13 +04:00
if ( ! found )
2012-11-17 06:27:13 +04:00
return ;
2013-04-19 00:36:43 +04:00
list_del ( & dev - > list ) ;
2012-11-17 06:27:13 +04:00
ndev = dev - > ndev ;
unregister_netdev ( ndev ) ;
ntb_transport_free_queue ( dev - > qp ) ;
free_netdev ( ndev ) ;
}
2015-04-09 17:33:20 +03:00
static struct ntb_transport_client ntb_netdev_client = {
2012-11-17 06:27:13 +04:00
. driver . name = KBUILD_MODNAME ,
. driver . owner = THIS_MODULE ,
. probe = ntb_netdev_probe ,
. remove = ntb_netdev_remove ,
} ;
static int __init ntb_netdev_init_module ( void )
{
int rc ;
2015-04-09 17:33:20 +03:00
rc = ntb_transport_register_client_dev ( KBUILD_MODNAME ) ;
2012-11-17 06:27:13 +04:00
if ( rc )
return rc ;
2015-05-07 13:45:21 +03:00
return ntb_transport_register_client ( & ntb_netdev_client ) ;
2012-11-17 06:27:13 +04:00
}
module_init ( ntb_netdev_init_module ) ;
static void __exit ntb_netdev_exit_module ( void )
{
2015-05-07 13:45:21 +03:00
ntb_transport_unregister_client ( & ntb_netdev_client ) ;
2015-04-09 17:33:20 +03:00
ntb_transport_unregister_client_dev ( KBUILD_MODNAME ) ;
2012-11-17 06:27:13 +04:00
}
module_exit ( ntb_netdev_exit_module ) ;