2008-09-15 09:17:11 -07:00
/*
2010-06-24 10:52:26 +00:00
* Copyright 2008 - 2010 Cisco Systems , Inc . All rights reserved .
2008-09-15 09:17:11 -07:00
* Copyright 2007 Nuova Systems , Inc . All rights reserved .
*
* This program is free software ; you may redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; version 2 of the License .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/string.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/init.h>
2011-06-06 10:43:46 +00:00
# include <linux/interrupt.h>
2008-09-15 09:17:11 -07:00
# include <linux/workqueue.h>
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
2011-08-16 06:29:00 +00:00
# include <linux/if.h>
2008-09-15 09:17:11 -07:00
# include <linux/if_ether.h>
# include <linux/if_vlan.h>
# include <linux/in.h>
# include <linux/ip.h>
# include <linux/ipv6.h>
# include <linux/tcp.h>
2010-06-24 10:52:26 +00:00
# include <linux/rtnetlink.h>
2011-05-22 16:47:17 -04:00
# include <linux/prefetch.h>
2008-10-13 18:41:01 -07:00
# include <net/ip6_checksum.h>
2014-05-20 03:14:05 +05:30
# include <linux/ktime.h>
2015-10-30 16:52:51 +05:30
# include <linux/numa.h>
2014-06-23 16:08:01 +05:30
# ifdef CONFIG_RFS_ACCEL
# include <linux/cpu_rmap.h>
# endif
2015-01-03 19:35:44 +05:30
# include <linux/crash_dump.h>
2017-02-03 17:28:21 -05:00
# include <net/busy_poll.h>
2017-02-08 16:43:08 -08:00
# include <net/vxlan.h>
2008-09-15 09:17:11 -07:00
# include "cq_enet_desc.h"
# include "vnic_dev.h"
# include "vnic_intr.h"
# include "vnic_stats.h"
2010-05-17 22:50:19 -07:00
# include "vnic_vic.h"
2008-09-15 09:17:11 -07:00
# include "enic_res.h"
# include "enic.h"
2011-02-04 16:17:05 +00:00
# include "enic_dev.h"
2011-03-29 20:36:07 +00:00
# include "enic_pp.h"
2014-06-23 16:08:02 +05:30
# include "enic_clsf.h"
2008-09-15 09:17:11 -07:00
# define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
2009-09-03 17:02:03 +00:00
# define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
# define MAX_TSO (1 << 16)
# define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
# define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
2010-05-17 22:50:19 -07:00
# define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
2012-01-18 04:23:55 +00:00
# define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
2008-09-15 09:17:11 -07:00
2014-09-03 03:17:19 +05:30
# define RX_COPYBREAK_DEFAULT 256
2008-09-15 09:17:11 -07:00
/* Supported devices */
2014-08-08 15:56:03 +02:00
static const struct pci_device_id enic_id_table [ ] = {
2009-09-03 17:02:03 +00:00
{ PCI_VDEVICE ( CISCO , PCI_DEVICE_ID_CISCO_VIC_ENET ) } ,
2010-05-17 22:50:19 -07:00
{ PCI_VDEVICE ( CISCO , PCI_DEVICE_ID_CISCO_VIC_ENET_DYN ) } ,
2012-01-18 04:23:55 +00:00
{ PCI_VDEVICE ( CISCO , PCI_DEVICE_ID_CISCO_VIC_ENET_VF ) } ,
2008-09-15 09:17:11 -07:00
{ 0 , } /* end of table */
} ;
MODULE_DESCRIPTION ( DRV_DESCRIPTION ) ;
MODULE_AUTHOR ( " Scott Feldman <scofeldm@cisco.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DEVICE_TABLE ( pci , enic_id_table ) ;
2014-05-20 03:14:05 +05:30
# define ENIC_LARGE_PKT_THRESHOLD 1000
# define ENIC_MAX_COALESCE_TIMERS 10
/* Interrupt moderation table, which will be used to decide the
* coalescing timer values
* { rx_rate in Mbps , mapping percentage of the range }
*/
2015-02-05 15:34:13 +00:00
static struct enic_intr_mod_table mod_table [ ENIC_MAX_COALESCE_TIMERS + 1 ] = {
2014-05-20 03:14:05 +05:30
{ 4000 , 0 } ,
{ 4400 , 10 } ,
{ 5060 , 20 } ,
{ 5230 , 30 } ,
{ 5540 , 40 } ,
{ 5820 , 50 } ,
{ 6120 , 60 } ,
{ 6435 , 70 } ,
{ 6745 , 80 } ,
{ 7000 , 90 } ,
{ 0xFFFFFFFF , 100 }
} ;
/* This table helps the driver to pick different ranges for rx coalescing
* timer depending on the link speed .
*/
2015-02-05 15:34:13 +00:00
static struct enic_intr_mod_range mod_range [ ENIC_MAX_LINK_SPEEDS ] = {
2014-05-20 03:14:05 +05:30
{ 0 , 0 } , /* 0 - 4 Gbps */
{ 0 , 3 } , /* 4 - 10 Gbps */
{ 3 , 6 } , /* 10 - 40 Gbps */
} ;
2015-10-30 16:52:51 +05:30
static void enic_init_affinity_hint ( struct enic * enic )
{
int numa_node = dev_to_node ( & enic - > pdev - > dev ) ;
int i ;
for ( i = 0 ; i < enic - > intr_count ; i + + ) {
if ( enic_is_err_intr ( enic , i ) | | enic_is_notify_intr ( enic , i ) | |
2019-03-07 16:52:24 +01:00
( cpumask_available ( enic - > msix [ i ] . affinity_mask ) & &
2015-10-30 16:52:51 +05:30
! cpumask_empty ( enic - > msix [ i ] . affinity_mask ) ) )
continue ;
if ( zalloc_cpumask_var ( & enic - > msix [ i ] . affinity_mask ,
GFP_KERNEL ) )
cpumask_set_cpu ( cpumask_local_spread ( i , numa_node ) ,
enic - > msix [ i ] . affinity_mask ) ;
}
}
static void enic_free_affinity_hint ( struct enic * enic )
{
int i ;
for ( i = 0 ; i < enic - > intr_count ; i + + ) {
if ( enic_is_err_intr ( enic , i ) | | enic_is_notify_intr ( enic , i ) )
continue ;
free_cpumask_var ( enic - > msix [ i ] . affinity_mask ) ;
}
}
static void enic_set_affinity_hint ( struct enic * enic )
{
int i ;
int err ;
for ( i = 0 ; i < enic - > intr_count ; i + + ) {
if ( enic_is_err_intr ( enic , i ) | |
enic_is_notify_intr ( enic , i ) | |
2019-03-07 16:52:24 +01:00
! cpumask_available ( enic - > msix [ i ] . affinity_mask ) | |
2015-10-30 16:52:51 +05:30
cpumask_empty ( enic - > msix [ i ] . affinity_mask ) )
continue ;
err = irq_set_affinity_hint ( enic - > msix_entry [ i ] . vector ,
enic - > msix [ i ] . affinity_mask ) ;
if ( err )
netdev_warn ( enic - > netdev , " irq_set_affinity_hint failed, err %d \n " ,
err ) ;
}
for ( i = 0 ; i < enic - > wq_count ; i + + ) {
int wq_intr = enic_msix_wq_intr ( enic , i ) ;
2019-03-07 16:52:24 +01:00
if ( cpumask_available ( enic - > msix [ wq_intr ] . affinity_mask ) & &
2015-10-30 16:52:51 +05:30
! cpumask_empty ( enic - > msix [ wq_intr ] . affinity_mask ) )
netif_set_xps_queue ( enic - > netdev ,
enic - > msix [ wq_intr ] . affinity_mask ,
i ) ;
}
}
static void enic_unset_affinity_hint ( struct enic * enic )
{
int i ;
for ( i = 0 ; i < enic - > intr_count ; i + + )
irq_set_affinity_hint ( enic - > msix_entry [ i ] . vector , NULL ) ;
}
2020-07-14 12:18:25 -07:00
static int enic_udp_tunnel_set_port ( struct net_device * netdev ,
unsigned int table , unsigned int entry ,
struct udp_tunnel_info * ti )
2017-02-08 16:43:08 -08:00
{
struct enic * enic = netdev_priv ( netdev ) ;
int err ;
spin_lock_bh ( & enic - > devcmd_lock ) ;
err = vnic_dev_overlay_offload_cfg ( enic - > vdev ,
OVERLAY_CFG_VXLAN_PORT_UPDATE ,
2020-07-14 12:18:25 -07:00
ntohs ( ti - > port ) ) ;
2017-02-08 16:43:08 -08:00
if ( err )
goto error ;
err = vnic_dev_overlay_offload_ctrl ( enic - > vdev , OVERLAY_FEATURE_VXLAN ,
enic - > vxlan . patch_level ) ;
if ( err )
goto error ;
2020-07-14 12:18:25 -07:00
enic - > vxlan . vxlan_udp_port_number = ntohs ( ti - > port ) ;
2017-02-08 16:43:08 -08:00
error :
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2020-07-14 12:18:25 -07:00
return err ;
2017-02-08 16:43:08 -08:00
}
2020-07-14 12:18:25 -07:00
static int enic_udp_tunnel_unset_port ( struct net_device * netdev ,
unsigned int table , unsigned int entry ,
struct udp_tunnel_info * ti )
2017-02-08 16:43:08 -08:00
{
struct enic * enic = netdev_priv ( netdev ) ;
int err ;
spin_lock_bh ( & enic - > devcmd_lock ) ;
err = vnic_dev_overlay_offload_ctrl ( enic - > vdev , OVERLAY_FEATURE_VXLAN ,
OVERLAY_OFFLOAD_DISABLE ) ;
2020-07-14 12:18:25 -07:00
if ( err )
2017-02-08 16:43:08 -08:00
goto unlock ;
enic - > vxlan . vxlan_udp_port_number = 0 ;
unlock :
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2020-07-14 12:18:25 -07:00
return err ;
2017-02-08 16:43:08 -08:00
}
2020-07-14 12:18:25 -07:00
static const struct udp_tunnel_nic_info enic_udp_tunnels = {
. set_port = enic_udp_tunnel_set_port ,
. unset_port = enic_udp_tunnel_unset_port ,
. tables = {
{ . n_entries = 1 , . tunnel_types = UDP_TUNNEL_TYPE_VXLAN , } ,
} ,
} , enic_udp_tunnels_v4 = {
. set_port = enic_udp_tunnel_set_port ,
. unset_port = enic_udp_tunnel_unset_port ,
. flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY ,
. tables = {
{ . n_entries = 1 , . tunnel_types = UDP_TUNNEL_TYPE_VXLAN , } ,
} ,
} ;
2017-02-08 16:43:09 -08:00
static netdev_features_t enic_features_check ( struct sk_buff * skb ,
struct net_device * dev ,
netdev_features_t features )
{
const struct ethhdr * eth = ( struct ethhdr * ) skb_inner_mac_header ( skb ) ;
struct enic * enic = netdev_priv ( dev ) ;
struct udphdr * udph ;
u16 port = 0 ;
2018-03-01 11:07:20 -08:00
u8 proto ;
2017-02-08 16:43:09 -08:00
if ( ! skb - > encapsulation )
return features ;
features = vxlan_features_check ( skb , features ) ;
2018-03-01 11:07:20 -08:00
switch ( vlan_get_protocol ( skb ) ) {
case htons ( ETH_P_IPV6 ) :
if ( ! ( enic - > vxlan . flags & ENIC_VXLAN_OUTER_IPV6 ) )
goto out ;
proto = ipv6_hdr ( skb ) - > nexthdr ;
break ;
case htons ( ETH_P_IP ) :
proto = ip_hdr ( skb ) - > protocol ;
break ;
default :
2017-02-08 16:43:09 -08:00
goto out ;
2018-03-01 11:07:20 -08:00
}
2017-02-08 16:43:09 -08:00
2018-03-01 11:07:20 -08:00
switch ( eth - > h_proto ) {
case ntohs ( ETH_P_IPV6 ) :
if ( ! ( enic - > vxlan . flags & ENIC_VXLAN_INNER_IPV6 ) )
goto out ;
2020-08-23 17:36:59 -05:00
fallthrough ;
2018-03-01 11:07:20 -08:00
case ntohs ( ETH_P_IP ) :
break ;
default :
2017-02-08 16:43:09 -08:00
goto out ;
2018-03-01 11:07:20 -08:00
}
2017-02-08 16:43:09 -08:00
if ( proto = = IPPROTO_UDP ) {
udph = udp_hdr ( skb ) ;
port = be16_to_cpu ( udph - > dest ) ;
}
/* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
* for other UDP port tunnels
*/
if ( port ! = enic - > vxlan . vxlan_udp_port_number )
goto out ;
return features ;
out :
return features & ~ ( NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK ) ;
}
2011-09-22 03:44:43 +00:00
int enic_is_dynamic ( struct enic * enic )
2010-05-17 22:50:19 -07:00
{
return enic - > pdev - > device = = PCI_DEVICE_ID_CISCO_VIC_ENET_DYN ;
}
2011-09-22 03:44:33 +00:00
int enic_sriov_enabled ( struct enic * enic )
{
return ( enic - > priv_flags & ENIC_SRIOV_ENABLED ) ? 1 : 0 ;
}
2012-01-18 04:23:55 +00:00
static int enic_is_sriov_vf ( struct enic * enic )
{
return enic - > pdev - > device = = PCI_DEVICE_ID_CISCO_VIC_ENET_VF ;
}
2011-09-22 03:44:38 +00:00
int enic_is_valid_vf ( struct enic * enic , int vf )
{
# ifdef CONFIG_PCI_IOV
return vf > = 0 & & vf < enic - > num_vfs ;
# else
return 0 ;
# endif
}
2008-09-15 09:17:11 -07:00
static void enic_free_wq_buf ( struct vnic_wq * wq , struct vnic_wq_buf * buf )
{
struct enic * enic = vnic_dev_priv ( wq - > vdev ) ;
if ( buf - > sop )
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_single ( & enic - > pdev - > dev , buf - > dma_addr , buf - > len ,
DMA_TO_DEVICE ) ;
2008-09-15 09:17:11 -07:00
else
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_page ( & enic - > pdev - > dev , buf - > dma_addr , buf - > len ,
DMA_TO_DEVICE ) ;
2008-09-15 09:17:11 -07:00
if ( buf - > os_buf )
dev_kfree_skb_any ( buf - > os_buf ) ;
}
static void enic_wq_free_buf ( struct vnic_wq * wq ,
struct cq_desc * cq_desc , struct vnic_wq_buf * buf , void * opaque )
{
enic_free_wq_buf ( wq , buf ) ;
}
static int enic_wq_service ( struct vnic_dev * vdev , struct cq_desc * cq_desc ,
u8 type , u16 q_number , u16 completed_index , void * opaque )
{
struct enic * enic = vnic_dev_priv ( vdev ) ;
spin_lock ( & enic - > wq_lock [ q_number ] ) ;
vnic_wq_service ( & enic - > wq [ q_number ] , cq_desc ,
completed_index , enic_wq_free_buf ,
opaque ) ;
2013-09-04 11:17:14 +05:30
if ( netif_tx_queue_stopped ( netdev_get_tx_queue ( enic - > netdev , q_number ) ) & &
2009-09-03 17:02:03 +00:00
vnic_wq_desc_avail ( & enic - > wq [ q_number ] ) > =
( MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS ) )
2013-09-04 11:17:14 +05:30
netif_wake_subqueue ( enic - > netdev , q_number ) ;
2008-09-15 09:17:11 -07:00
spin_unlock ( & enic - > wq_lock [ q_number ] ) ;
return 0 ;
}
2015-10-01 14:18:46 +05:30
static bool enic_log_q_error ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
unsigned int i ;
u32 error_status ;
2015-10-01 14:18:46 +05:30
bool err = false ;
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > wq_count ; i + + ) {
error_status = vnic_wq_error_status ( & enic - > wq [ i ] ) ;
2015-10-01 14:18:46 +05:30
err | = error_status ;
2008-09-15 09:17:11 -07:00
if ( error_status )
2010-06-24 10:50:56 +00:00
netdev_err ( enic - > netdev , " WQ[%d] error_status %d \n " ,
i , error_status ) ;
2008-09-15 09:17:11 -07:00
}
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
error_status = vnic_rq_error_status ( & enic - > rq [ i ] ) ;
2015-10-01 14:18:46 +05:30
err | = error_status ;
2008-09-15 09:17:11 -07:00
if ( error_status )
2010-06-24 10:50:56 +00:00
netdev_err ( enic - > netdev , " RQ[%d] error_status %d \n " ,
i , error_status ) ;
2008-09-15 09:17:11 -07:00
}
2015-10-01 14:18:46 +05:30
return err ;
2008-09-15 09:17:11 -07:00
}
2010-06-24 10:50:12 +00:00
static void enic_msglvl_check ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
2010-06-24 10:50:12 +00:00
u32 msg_enable = vnic_dev_msg_lvl ( enic - > vdev ) ;
2008-09-15 09:17:11 -07:00
2010-06-24 10:50:12 +00:00
if ( msg_enable ! = enic - > msg_enable ) {
2010-06-24 10:50:56 +00:00
netdev_info ( enic - > netdev , " msg lvl changed from 0x%x to 0x%x \n " ,
enic - > msg_enable , msg_enable ) ;
2010-06-24 10:50:12 +00:00
enic - > msg_enable = msg_enable ;
2008-09-15 09:17:11 -07:00
}
}
static void enic_mtu_check ( struct enic * enic )
{
u32 mtu = vnic_dev_mtu ( enic - > vdev ) ;
2010-06-24 10:50:56 +00:00
struct net_device * netdev = enic - > netdev ;
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:40 +00:00
if ( mtu & & mtu ! = enic - > port_mtu ) {
2009-12-23 13:27:54 +00:00
enic - > port_mtu = mtu ;
2012-01-18 04:24:02 +00:00
if ( enic_is_dynamic ( enic ) | | enic_is_sriov_vf ( enic ) ) {
2011-06-03 14:35:17 +00:00
mtu = max_t ( int , ENIC_MIN_MTU ,
min_t ( int , ENIC_MAX_MTU , mtu ) ) ;
if ( mtu ! = netdev - > mtu )
schedule_work ( & enic - > change_mtu_work ) ;
} else {
if ( mtu < netdev - > mtu )
netdev_warn ( netdev ,
" interface MTU (%d) set higher "
" than switch port MTU (%d) \n " ,
netdev - > mtu , mtu ) ;
}
2008-09-15 09:17:11 -07:00
}
}
2010-06-24 10:50:12 +00:00
static void enic_link_check ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
2010-06-24 10:50:12 +00:00
int link_status = vnic_dev_link_status ( enic - > vdev ) ;
int carrier_ok = netif_carrier_ok ( enic - > netdev ) ;
2008-09-15 09:17:11 -07:00
2010-06-24 10:50:12 +00:00
if ( link_status & & ! carrier_ok ) {
2010-06-24 10:50:56 +00:00
netdev_info ( enic - > netdev , " Link UP \n " ) ;
2010-06-24 10:50:12 +00:00
netif_carrier_on ( enic - > netdev ) ;
} else if ( ! link_status & & carrier_ok ) {
2010-06-24 10:50:56 +00:00
netdev_info ( enic - > netdev , " Link DOWN \n " ) ;
2010-06-24 10:50:12 +00:00
netif_carrier_off ( enic - > netdev ) ;
2008-09-15 09:17:11 -07:00
}
}
static void enic_notify_check ( struct enic * enic )
{
enic_msglvl_check ( enic ) ;
enic_mtu_check ( enic ) ;
enic_link_check ( enic ) ;
}
# define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
static irqreturn_t enic_isr_legacy ( int irq , void * data )
{
struct net_device * netdev = data ;
struct enic * enic = netdev_priv ( netdev ) ;
2010-10-20 10:16:59 +00:00
unsigned int io_intr = enic_legacy_io_intr ( ) ;
unsigned int err_intr = enic_legacy_err_intr ( ) ;
unsigned int notify_intr = enic_legacy_notify_intr ( ) ;
2008-09-15 09:17:11 -07:00
u32 pba ;
2010-10-20 10:16:59 +00:00
vnic_intr_mask ( & enic - > intr [ io_intr ] ) ;
2008-09-15 09:17:11 -07:00
pba = vnic_intr_legacy_pba ( enic - > legacy_pba ) ;
if ( ! pba ) {
2010-10-20 10:16:59 +00:00
vnic_intr_unmask ( & enic - > intr [ io_intr ] ) ;
2008-09-15 09:17:11 -07:00
return IRQ_NONE ; /* not our interrupt */
}
2010-10-20 10:16:59 +00:00
if ( ENIC_TEST_INTR ( pba , notify_intr ) ) {
2008-09-15 09:17:11 -07:00
enic_notify_check ( enic ) ;
2015-02-25 15:26:55 +05:30
vnic_intr_return_all_credits ( & enic - > intr [ notify_intr ] ) ;
2009-02-09 23:23:50 -08:00
}
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
if ( ENIC_TEST_INTR ( pba , err_intr ) ) {
vnic_intr_return_all_credits ( & enic - > intr [ err_intr ] ) ;
2008-09-15 09:17:11 -07:00
enic_log_q_error ( enic ) ;
/* schedule recovery from WQ/RQ error */
schedule_work ( & enic - > reset ) ;
return IRQ_HANDLED ;
}
2014-11-23 01:22:51 +05:30
if ( ENIC_TEST_INTR ( pba , io_intr ) )
napi_schedule_irqoff ( & enic - > napi [ 0 ] ) ;
else
2010-10-20 10:16:59 +00:00
vnic_intr_unmask ( & enic - > intr [ io_intr ] ) ;
2008-09-15 09:17:11 -07:00
return IRQ_HANDLED ;
}
static irqreturn_t enic_isr_msi ( int irq , void * data )
{
struct enic * enic = data ;
/* With MSI, there is no sharing of interrupts, so this is
* our interrupt and there is no need to ack it . The device
* is not providing per - vector masking , so the OS will not
* write to PCI config space to mask / unmask the interrupt .
* We ' re using mask_on_assertion for MSI , so the device
* automatically masks the interrupt when the interrupt is
* generated . Later , when exiting polling , the interrupt
* will be unmasked ( see enic_poll ) .
*
* Also , the device uses the same PCIe Traffic Class ( TC )
* for Memory Write data and MSI , so there are no ordering
* issues ; the MSI will always arrive at the Root Complex
* _after_ corresponding Memory Writes ( i . e . descriptor
* writes ) .
*/
2014-11-23 01:22:51 +05:30
napi_schedule_irqoff ( & enic - > napi [ 0 ] ) ;
2008-09-15 09:17:11 -07:00
return IRQ_HANDLED ;
}
2014-06-23 16:08:05 +05:30
static irqreturn_t enic_isr_msix ( int irq , void * data )
2008-09-15 09:17:11 -07:00
{
2010-10-20 10:16:59 +00:00
struct napi_struct * napi = data ;
2008-09-15 09:17:11 -07:00
2014-11-23 01:22:51 +05:30
napi_schedule_irqoff ( napi ) ;
2008-09-15 09:17:11 -07:00
return IRQ_HANDLED ;
}
static irqreturn_t enic_isr_msix_err ( int irq , void * data )
{
struct enic * enic = data ;
2010-10-20 10:16:59 +00:00
unsigned int intr = enic_msix_err_intr ( enic ) ;
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
vnic_intr_return_all_credits ( & enic - > intr [ intr ] ) ;
2009-02-09 23:23:50 -08:00
2015-10-01 14:18:46 +05:30
if ( enic_log_q_error ( enic ) )
/* schedule recovery from WQ/RQ error */
schedule_work ( & enic - > reset ) ;
2008-09-15 09:17:11 -07:00
return IRQ_HANDLED ;
}
static irqreturn_t enic_isr_msix_notify ( int irq , void * data )
{
struct enic * enic = data ;
2010-10-20 10:16:59 +00:00
unsigned int intr = enic_msix_notify_intr ( enic ) ;
2008-09-15 09:17:11 -07:00
enic_notify_check ( enic ) ;
2015-02-25 15:26:55 +05:30
vnic_intr_return_all_credits ( & enic - > intr [ intr ] ) ;
2008-09-15 09:17:11 -07:00
return IRQ_HANDLED ;
}
2014-12-24 15:59:37 +05:30
static int enic_queue_wq_skb_cont ( struct enic * enic , struct vnic_wq * wq ,
struct sk_buff * skb , unsigned int len_left ,
int loopback )
2008-09-15 09:17:11 -07:00
{
2011-10-18 21:00:24 +00:00
const skb_frag_t * frag ;
2014-12-24 15:59:37 +05:30
dma_addr_t dma_addr ;
2008-09-15 09:17:11 -07:00
/* Queue additional data fragments */
for ( frag = skb_shinfo ( skb ) - > frags ; len_left ; frag + + ) {
2011-10-18 21:00:24 +00:00
len_left - = skb_frag_size ( frag ) ;
2014-12-24 15:59:37 +05:30
dma_addr = skb_frag_dma_map ( & enic - > pdev - > dev , frag , 0 ,
skb_frag_size ( frag ) ,
DMA_TO_DEVICE ) ;
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
enic_queue_wq_desc_cont ( wq , skb , dma_addr , skb_frag_size ( frag ) ,
( len_left = = 0 ) , /* EOP? */
loopback ) ;
2008-09-15 09:17:11 -07:00
}
2014-12-24 15:59:37 +05:30
return 0 ;
2008-09-15 09:17:11 -07:00
}
2014-12-24 15:59:37 +05:30
static int enic_queue_wq_skb_vlan ( struct enic * enic , struct vnic_wq * wq ,
struct sk_buff * skb , int vlan_tag_insert ,
unsigned int vlan_tag , int loopback )
2008-09-15 09:17:11 -07:00
{
unsigned int head_len = skb_headlen ( skb ) ;
unsigned int len_left = skb - > len - head_len ;
int eop = ( len_left = = 0 ) ;
2014-12-24 15:59:37 +05:30
dma_addr_t dma_addr ;
int err = 0 ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_addr = dma_map_single ( & enic - > pdev - > dev , skb - > data , head_len ,
DMA_TO_DEVICE ) ;
2014-12-24 15:59:37 +05:30
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
/* Queue the main skb fragment. The fragments are no larger
* than max MTU ( 9000 ) + ETH_HDR_LEN ( 14 ) bytes , which is less
* than WQ_ENET_MAX_DESC_LEN length . So only one descriptor
* per fragment is queued .
*/
2014-12-24 15:59:37 +05:30
enic_queue_wq_desc ( wq , skb , dma_addr , head_len , vlan_tag_insert ,
vlan_tag , eop , loopback ) ;
2008-09-15 09:17:11 -07:00
if ( ! eop )
2014-12-24 15:59:37 +05:30
err = enic_queue_wq_skb_cont ( enic , wq , skb , len_left , loopback ) ;
return err ;
2008-09-15 09:17:11 -07:00
}
2014-12-24 15:59:37 +05:30
static int enic_queue_wq_skb_csum_l4 ( struct enic * enic , struct vnic_wq * wq ,
struct sk_buff * skb , int vlan_tag_insert ,
unsigned int vlan_tag , int loopback )
2008-09-15 09:17:11 -07:00
{
unsigned int head_len = skb_headlen ( skb ) ;
unsigned int len_left = skb - > len - head_len ;
2010-12-14 15:24:08 +00:00
unsigned int hdr_len = skb_checksum_start_offset ( skb ) ;
2008-09-15 09:17:11 -07:00
unsigned int csum_offset = hdr_len + skb - > csum_offset ;
int eop = ( len_left = = 0 ) ;
2014-12-24 15:59:37 +05:30
dma_addr_t dma_addr ;
int err = 0 ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_addr = dma_map_single ( & enic - > pdev - > dev , skb - > data , head_len ,
DMA_TO_DEVICE ) ;
2014-12-24 15:59:37 +05:30
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
/* Queue the main skb fragment. The fragments are no larger
* than max MTU ( 9000 ) + ETH_HDR_LEN ( 14 ) bytes , which is less
* than WQ_ENET_MAX_DESC_LEN length . So only one descriptor
* per fragment is queued .
*/
2014-12-24 15:59:37 +05:30
enic_queue_wq_desc_csum_l4 ( wq , skb , dma_addr , head_len , csum_offset ,
hdr_len , vlan_tag_insert , vlan_tag , eop ,
loopback ) ;
2008-09-15 09:17:11 -07:00
if ( ! eop )
2014-12-24 15:59:37 +05:30
err = enic_queue_wq_skb_cont ( enic , wq , skb , len_left , loopback ) ;
return err ;
2008-09-15 09:17:11 -07:00
}
2017-02-08 16:43:09 -08:00
static void enic_preload_tcp_csum_encap ( struct sk_buff * skb )
2008-09-15 09:17:11 -07:00
{
2018-03-01 11:07:19 -08:00
const struct ethhdr * eth = ( struct ethhdr * ) skb_inner_mac_header ( skb ) ;
switch ( eth - > h_proto ) {
case ntohs ( ETH_P_IP ) :
2017-02-08 16:43:09 -08:00
inner_ip_hdr ( skb ) - > check = 0 ;
inner_tcp_hdr ( skb ) - > check =
~ csum_tcpudp_magic ( inner_ip_hdr ( skb ) - > saddr ,
inner_ip_hdr ( skb ) - > daddr , 0 ,
IPPROTO_TCP , 0 ) ;
2018-03-01 11:07:19 -08:00
break ;
case ntohs ( ETH_P_IPV6 ) :
inner_tcp_hdr ( skb ) - > check =
~ csum_ipv6_magic ( & inner_ipv6_hdr ( skb ) - > saddr ,
& inner_ipv6_hdr ( skb ) - > daddr , 0 ,
IPPROTO_TCP , 0 ) ;
break ;
default :
WARN_ONCE ( 1 , " Non ipv4/ipv6 inner pkt for encap offload " ) ;
break ;
2017-02-08 16:43:09 -08:00
}
}
2008-09-15 09:17:11 -07:00
2017-02-08 16:43:09 -08:00
static void enic_preload_tcp_csum ( struct sk_buff * skb )
{
2008-09-15 09:17:11 -07:00
/* Preload TCP csum field with IP pseudo hdr calculated
* with IP length set to zero . HW will later add in length
* to each TCP segment resulting from the TSO .
*/
2009-02-01 00:45:17 -08:00
if ( skb - > protocol = = cpu_to_be16 ( ETH_P_IP ) ) {
2008-09-15 09:17:11 -07:00
ip_hdr ( skb ) - > check = 0 ;
tcp_hdr ( skb ) - > check = ~ csum_tcpudp_magic ( ip_hdr ( skb ) - > saddr ,
ip_hdr ( skb ) - > daddr , 0 , IPPROTO_TCP , 0 ) ;
2009-02-01 00:45:17 -08:00
} else if ( skb - > protocol = = cpu_to_be16 ( ETH_P_IPV6 ) ) {
2020-02-18 21:02:26 +01:00
tcp_v6_gso_csum_prep ( skb ) ;
2008-09-15 09:17:11 -07:00
}
2017-02-08 16:43:09 -08:00
}
static int enic_queue_wq_skb_tso ( struct enic * enic , struct vnic_wq * wq ,
struct sk_buff * skb , unsigned int mss ,
int vlan_tag_insert , unsigned int vlan_tag ,
int loopback )
{
unsigned int frag_len_left = skb_headlen ( skb ) ;
unsigned int len_left = skb - > len - frag_len_left ;
int eop = ( len_left = = 0 ) ;
unsigned int offset = 0 ;
unsigned int hdr_len ;
dma_addr_t dma_addr ;
unsigned int len ;
skb_frag_t * frag ;
if ( skb - > encapsulation ) {
hdr_len = skb_inner_transport_header ( skb ) - skb - > data ;
hdr_len + = inner_tcp_hdrlen ( skb ) ;
enic_preload_tcp_csum_encap ( skb ) ;
} else {
hdr_len = skb_transport_offset ( skb ) + tcp_hdrlen ( skb ) ;
enic_preload_tcp_csum ( skb ) ;
}
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for the main skb fragment
*/
while ( frag_len_left ) {
len = min ( frag_len_left , ( unsigned int ) WQ_ENET_MAX_DESC_LEN ) ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_addr = dma_map_single ( & enic - > pdev - > dev ,
skb - > data + offset , len ,
DMA_TO_DEVICE ) ;
2014-12-24 15:59:37 +05:30
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
enic_queue_wq_desc_tso ( wq , skb , dma_addr , len , mss , hdr_len ,
vlan_tag_insert , vlan_tag ,
eop & & ( len = = frag_len_left ) , loopback ) ;
2009-09-03 17:02:03 +00:00
frag_len_left - = len ;
offset + = len ;
}
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
if ( eop )
2014-12-24 15:59:37 +05:30
return 0 ;
2009-09-03 17:02:03 +00:00
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
*/
for ( frag = skb_shinfo ( skb ) - > frags ; len_left ; frag + + ) {
2011-10-18 21:00:24 +00:00
len_left - = skb_frag_size ( frag ) ;
frag_len_left = skb_frag_size ( frag ) ;
2011-08-29 23:18:27 +00:00
offset = 0 ;
2009-09-03 17:02:03 +00:00
while ( frag_len_left ) {
len = min ( frag_len_left ,
( unsigned int ) WQ_ENET_MAX_DESC_LEN ) ;
2011-08-29 23:18:27 +00:00
dma_addr = skb_frag_dma_map ( & enic - > pdev - > dev , frag ,
offset , len ,
2011-10-06 11:10:48 +01:00
DMA_TO_DEVICE ) ;
2014-12-24 15:59:37 +05:30
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
enic_queue_wq_desc_cont ( wq , skb , dma_addr , len ,
( len_left = = 0 ) & &
( len = = frag_len_left ) , /*EOP*/
loopback ) ;
2009-09-03 17:02:03 +00:00
frag_len_left - = len ;
offset + = len ;
}
}
2014-12-24 15:59:37 +05:30
return 0 ;
2008-09-15 09:17:11 -07:00
}
2017-02-08 16:43:09 -08:00
static inline int enic_queue_wq_skb_encap ( struct enic * enic , struct vnic_wq * wq ,
struct sk_buff * skb ,
int vlan_tag_insert ,
unsigned int vlan_tag , int loopback )
{
unsigned int head_len = skb_headlen ( skb ) ;
unsigned int len_left = skb - > len - head_len ;
/* Hardware will overwrite the checksum fields, calculating from
* scratch and ignoring the value placed by software .
* Offload mode = 00
* mss [ 2 ] , mss [ 1 ] , mss [ 0 ] bits are set
*/
unsigned int mss_or_csum = 7 ;
int eop = ( len_left = = 0 ) ;
dma_addr_t dma_addr ;
int err = 0 ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_addr = dma_map_single ( & enic - > pdev - > dev , skb - > data , head_len ,
DMA_TO_DEVICE ) ;
2017-02-08 16:43:09 -08:00
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) )
return - ENOMEM ;
enic_queue_wq_desc_ex ( wq , skb , dma_addr , head_len , mss_or_csum , 0 ,
vlan_tag_insert , vlan_tag ,
WQ_ENET_OFFLOAD_MODE_CSUM , eop , 1 /* SOP */ , eop ,
loopback ) ;
if ( ! eop )
err = enic_queue_wq_skb_cont ( enic , wq , skb , len_left , loopback ) ;
return err ;
}
2021-05-02 04:58:18 -07:00
static inline int enic_queue_wq_skb ( struct enic * enic ,
2008-09-15 09:17:11 -07:00
struct vnic_wq * wq , struct sk_buff * skb )
{
unsigned int mss = skb_shinfo ( skb ) - > gso_size ;
unsigned int vlan_tag = 0 ;
int vlan_tag_insert = 0 ;
2010-06-24 10:51:59 +00:00
int loopback = 0 ;
2014-12-24 15:59:37 +05:30
int err ;
2008-09-15 09:17:11 -07:00
2015-01-13 17:13:44 +01:00
if ( skb_vlan_tag_present ( skb ) ) {
2008-09-15 09:17:11 -07:00
/* VLAN tag from trunking driver */
vlan_tag_insert = 1 ;
2015-01-13 17:13:44 +01:00
vlan_tag = skb_vlan_tag_get ( skb ) ;
2010-06-24 10:51:59 +00:00
} else if ( enic - > loop_enable ) {
vlan_tag = enic - > loop_tag ;
loopback = 1 ;
2008-09-15 09:17:11 -07:00
}
if ( mss )
2014-12-24 15:59:37 +05:30
err = enic_queue_wq_skb_tso ( enic , wq , skb , mss ,
vlan_tag_insert , vlan_tag ,
loopback ) ;
2017-02-08 16:43:09 -08:00
else if ( skb - > encapsulation )
err = enic_queue_wq_skb_encap ( enic , wq , skb , vlan_tag_insert ,
vlan_tag , loopback ) ;
2008-09-15 09:17:11 -07:00
else if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
2014-12-24 15:59:37 +05:30
err = enic_queue_wq_skb_csum_l4 ( enic , wq , skb , vlan_tag_insert ,
vlan_tag , loopback ) ;
2008-09-15 09:17:11 -07:00
else
2014-12-24 15:59:37 +05:30
err = enic_queue_wq_skb_vlan ( enic , wq , skb , vlan_tag_insert ,
vlan_tag , loopback ) ;
if ( unlikely ( err ) ) {
struct vnic_wq_buf * buf ;
buf = wq - > to_use - > prev ;
/* while not EOP of previous pkt && queue not empty.
* For all non EOP bufs , os_buf is NULL .
*/
while ( ! buf - > os_buf & & ( buf - > next ! = wq - > to_clean ) ) {
enic_free_wq_buf ( wq , buf ) ;
wq - > ring . desc_avail + + ;
buf = buf - > prev ;
}
wq - > to_use = buf - > next ;
dev_kfree_skb ( skb ) ;
}
2021-05-02 04:58:18 -07:00
return err ;
2008-09-15 09:17:11 -07:00
}
2009-02-09 23:23:50 -08:00
/* netif_tx_lock held, process context with BHs disabled, or BH */
2009-08-31 19:50:58 +00:00
static netdev_tx_t enic_hard_start_xmit ( struct sk_buff * skb ,
2009-12-23 13:27:59 +00:00
struct net_device * netdev )
2008-09-15 09:17:11 -07:00
{
struct enic * enic = netdev_priv ( netdev ) ;
2013-09-04 11:17:14 +05:30
struct vnic_wq * wq ;
unsigned int txq_map ;
2014-11-19 12:59:32 +05:30
struct netdev_queue * txq ;
2008-09-15 09:17:11 -07:00
if ( skb - > len < = 0 ) {
2014-03-15 16:49:05 -07:00
dev_kfree_skb_any ( skb ) ;
2008-09-15 09:17:11 -07:00
return NETDEV_TX_OK ;
}
2013-09-04 11:17:14 +05:30
txq_map = skb_get_queue_mapping ( skb ) % enic - > wq_count ;
wq = & enic - > wq [ txq_map ] ;
2014-11-19 12:59:32 +05:30
txq = netdev_get_tx_queue ( netdev , txq_map ) ;
2013-09-04 11:17:14 +05:30
2008-09-15 09:17:11 -07:00
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely . In the off chance it ' s going to take
* more than * ENIC_NON_TSO_MAX_DESC , linearize the skb .
*/
if ( skb_shinfo ( skb ) - > gso_size = = 0 & &
skb_shinfo ( skb ) - > nr_frags + 1 > ENIC_NON_TSO_MAX_DESC & &
skb_linearize ( skb ) ) {
2014-03-15 16:49:05 -07:00
dev_kfree_skb_any ( skb ) ;
2008-09-15 09:17:11 -07:00
return NETDEV_TX_OK ;
}
2014-11-23 01:22:52 +05:30
spin_lock ( & enic - > wq_lock [ txq_map ] ) ;
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
if ( vnic_wq_desc_avail ( wq ) <
skb_shinfo ( skb ) - > nr_frags + ENIC_DESC_MAX_SPLITS ) {
2014-11-19 12:59:32 +05:30
netif_tx_stop_queue ( txq ) ;
2008-09-15 09:17:11 -07:00
/* This is a hard error, log it */
2010-06-24 10:50:56 +00:00
netdev_err ( netdev , " BUG! Tx ring full when queue awake! \n " ) ;
2014-11-23 01:22:52 +05:30
spin_unlock ( & enic - > wq_lock [ txq_map ] ) ;
2008-09-15 09:17:11 -07:00
return NETDEV_TX_BUSY ;
}
2021-05-02 04:58:18 -07:00
if ( enic_queue_wq_skb ( enic , wq , skb ) )
goto error ;
2008-09-15 09:17:11 -07:00
2009-09-03 17:02:03 +00:00
if ( vnic_wq_desc_avail ( wq ) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS )
2014-11-19 12:59:32 +05:30
netif_tx_stop_queue ( txq ) ;
2017-12-01 10:21:40 -08:00
skb_tx_timestamp ( skb ) ;
2019-04-01 16:42:14 +02:00
if ( ! netdev_xmit_more ( ) | | netif_xmit_stopped ( txq ) )
2014-11-19 12:59:32 +05:30
vnic_wq_doorbell ( wq ) ;
2008-09-15 09:17:11 -07:00
2021-05-02 04:58:18 -07:00
error :
2014-11-23 01:22:52 +05:30
spin_unlock ( & enic - > wq_lock [ txq_map ] ) ;
2008-09-15 09:17:11 -07:00
return NETDEV_TX_OK ;
}
/* dev_base_lock rwlock held, nominally process context */
2017-01-06 19:12:52 -08:00
static void enic_get_stats ( struct net_device * netdev ,
struct rtnl_link_stats64 * net_stats )
2008-09-15 09:17:11 -07:00
{
struct enic * enic = netdev_priv ( netdev ) ;
struct vnic_stats * stats ;
2015-06-11 11:52:55 +05:30
int err ;
2008-09-15 09:17:11 -07:00
2015-06-11 11:52:55 +05:30
err = enic_dev_stats_dump ( enic , & stats ) ;
2021-09-25 20:46:28 +08:00
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
2015-06-11 11:52:55 +05:30
* For other failures , like devcmd failure , we return previously
* recorded stats .
*/
if ( err = = - ENOMEM )
2017-01-06 19:12:52 -08:00
return ;
2008-09-15 09:17:11 -07:00
2008-09-24 11:23:32 -07:00
net_stats - > tx_packets = stats - > tx . tx_frames_ok ;
net_stats - > tx_bytes = stats - > tx . tx_bytes_ok ;
net_stats - > tx_errors = stats - > tx . tx_errors ;
net_stats - > tx_dropped = stats - > tx . tx_drops ;
2008-09-15 09:17:11 -07:00
2008-09-24 11:23:32 -07:00
net_stats - > rx_packets = stats - > rx . rx_frames_ok ;
net_stats - > rx_bytes = stats - > rx . rx_bytes_ok ;
net_stats - > rx_errors = stats - > rx . rx_errors ;
net_stats - > multicast = stats - > rx . rx_multicast_frames_ok ;
2009-09-03 17:02:19 +00:00
net_stats - > rx_over_errors = enic - > rq_truncated_pkts ;
2009-02-09 23:24:08 -08:00
net_stats - > rx_crc_errors = enic - > rq_bad_fcs ;
2009-09-03 17:02:19 +00:00
net_stats - > rx_dropped = stats - > rx . rx_no_bufs + stats - > rx . rx_drop ;
2008-09-15 09:17:11 -07:00
}
2014-05-28 18:44:52 -07:00
static int enic_mc_sync ( struct net_device * netdev , const u8 * mc_addr )
{
struct enic * enic = netdev_priv ( netdev ) ;
if ( enic - > mc_count = = ENIC_MULTICAST_PERFECT_FILTERS ) {
unsigned int mc_count = netdev_mc_count ( netdev ) ;
netdev_warn ( netdev , " Registering only %d out of %d multicast addresses \n " ,
ENIC_MULTICAST_PERFECT_FILTERS , mc_count ) ;
return - ENOSPC ;
}
enic_dev_add_addr ( enic , mc_addr ) ;
enic - > mc_count + + ;
return 0 ;
}
static int enic_mc_unsync ( struct net_device * netdev , const u8 * mc_addr )
{
struct enic * enic = netdev_priv ( netdev ) ;
enic_dev_del_addr ( enic , mc_addr ) ;
enic - > mc_count - - ;
return 0 ;
}
static int enic_uc_sync ( struct net_device * netdev , const u8 * uc_addr )
{
struct enic * enic = netdev_priv ( netdev ) ;
if ( enic - > uc_count = = ENIC_UNICAST_PERFECT_FILTERS ) {
unsigned int uc_count = netdev_uc_count ( netdev ) ;
netdev_warn ( netdev , " Registering only %d out of %d unicast addresses \n " ,
ENIC_UNICAST_PERFECT_FILTERS , uc_count ) ;
return - ENOSPC ;
}
enic_dev_add_addr ( enic , uc_addr ) ;
enic - > uc_count + + ;
return 0 ;
}
static int enic_uc_unsync ( struct net_device * netdev , const u8 * uc_addr )
{
struct enic * enic = netdev_priv ( netdev ) ;
enic_dev_del_addr ( enic , uc_addr ) ;
enic - > uc_count - - ;
return 0 ;
}
2011-03-29 20:36:07 +00:00
void enic_reset_addr_lists ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
2014-05-28 18:44:52 -07:00
struct net_device * netdev = enic - > netdev ;
__dev_uc_unsync ( netdev , NULL ) ;
__dev_mc_unsync ( netdev , NULL ) ;
2008-09-15 09:17:11 -07:00
enic - > mc_count = 0 ;
2011-02-17 08:53:12 +00:00
enic - > uc_count = 0 ;
2010-06-24 10:50:00 +00:00
enic - > flags = 0 ;
2008-09-15 09:17:11 -07:00
}
static int enic_set_mac_addr ( struct net_device * netdev , char * addr )
{
2010-05-17 22:50:19 -07:00
struct enic * enic = netdev_priv ( netdev ) ;
2012-01-18 04:24:02 +00:00
if ( enic_is_dynamic ( enic ) | | enic_is_sriov_vf ( enic ) ) {
2010-05-17 22:50:19 -07:00
if ( ! is_valid_ether_addr ( addr ) & & ! is_zero_ether_addr ( addr ) )
return - EADDRNOTAVAIL ;
} else {
if ( ! is_valid_ether_addr ( addr ) )
return - EADDRNOTAVAIL ;
}
2008-09-15 09:17:11 -07:00
2021-10-04 09:05:21 -07:00
eth_hw_addr_set ( netdev , addr ) ;
2008-09-15 09:17:11 -07:00
return 0 ;
}
2010-05-17 22:50:19 -07:00
static int enic_set_mac_address_dynamic ( struct net_device * netdev , void * p )
{
struct enic * enic = netdev_priv ( netdev ) ;
struct sockaddr * saddr = p ;
char * addr = saddr - > sa_data ;
int err ;
if ( netif_running ( enic - > netdev ) ) {
err = enic_dev_del_station_addr ( enic ) ;
if ( err )
return err ;
}
err = enic_set_mac_addr ( netdev , addr ) ;
if ( err )
return err ;
if ( netif_running ( enic - > netdev ) ) {
err = enic_dev_add_station_addr ( enic ) ;
if ( err )
return err ;
}
return err ;
}
static int enic_set_mac_address ( struct net_device * netdev , void * p )
{
2010-08-10 18:54:55 +00:00
struct sockaddr * saddr = p ;
2010-10-20 10:17:04 +00:00
char * addr = saddr - > sa_data ;
struct enic * enic = netdev_priv ( netdev ) ;
int err ;
err = enic_dev_del_station_addr ( enic ) ;
if ( err )
return err ;
err = enic_set_mac_addr ( netdev , addr ) ;
if ( err )
return err ;
2010-08-10 18:54:55 +00:00
2010-10-20 10:17:04 +00:00
return enic_dev_add_station_addr ( enic ) ;
2010-05-17 22:50:19 -07:00
}
2010-12-08 13:19:58 +00:00
/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode ( struct net_device * netdev )
{
struct enic * enic = netdev_priv ( netdev ) ;
int directed = 1 ;
int multicast = ( netdev - > flags & IFF_MULTICAST ) ? 1 : 0 ;
int broadcast = ( netdev - > flags & IFF_BROADCAST ) ? 1 : 0 ;
int promisc = ( netdev - > flags & IFF_PROMISC ) | |
netdev_uc_count ( netdev ) > ENIC_UNICAST_PERFECT_FILTERS ;
int allmulti = ( netdev - > flags & IFF_ALLMULTI ) | |
netdev_mc_count ( netdev ) > ENIC_MULTICAST_PERFECT_FILTERS ;
unsigned int flags = netdev - > flags |
( allmulti ? IFF_ALLMULTI : 0 ) |
( promisc ? IFF_PROMISC : 0 ) ;
if ( enic - > flags ! = flags ) {
enic - > flags = flags ;
enic_dev_packet_filter ( enic , directed ,
multicast , broadcast , promisc , allmulti ) ;
}
if ( ! promisc ) {
2014-05-28 18:44:52 -07:00
__dev_uc_sync ( netdev , enic_uc_sync , enic_uc_unsync ) ;
2010-12-08 13:19:58 +00:00
if ( ! allmulti )
2014-05-28 18:44:52 -07:00
__dev_mc_sync ( netdev , enic_mc_sync , enic_mc_unsync ) ;
2010-12-08 13:19:58 +00:00
}
}
2008-09-15 09:17:11 -07:00
/* netif_tx_lock held, BHs disabled */
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 09:23:51 -05:00
static void enic_tx_timeout ( struct net_device * netdev , unsigned int txqueue )
2008-09-15 09:17:11 -07:00
{
struct enic * enic = netdev_priv ( netdev ) ;
2015-10-01 14:18:47 +05:30
schedule_work ( & enic - > tx_hang_reset ) ;
2008-09-15 09:17:11 -07:00
}
2010-12-08 13:53:58 +00:00
static int enic_set_vf_mac ( struct net_device * netdev , int vf , u8 * mac )
{
struct enic * enic = netdev_priv ( netdev ) ;
2011-09-22 03:44:43 +00:00
struct enic_port_profile * pp ;
int err ;
2010-12-08 13:53:58 +00:00
2011-09-22 03:44:43 +00:00
ENIC_PP_BY_INDEX ( enic , vf , pp , & err ) ;
if ( err )
return err ;
2010-12-08 13:53:58 +00:00
2012-03-07 03:50:44 +00:00
if ( is_valid_ether_addr ( mac ) | | is_zero_ether_addr ( mac ) ) {
2012-02-20 00:11:58 +00:00
if ( vf = = PORT_SELF_VF ) {
memcpy ( pp - > vf_mac , mac , ETH_ALEN ) ;
return 0 ;
} else {
/*
* For sriov vf ' s set the mac in hw
*/
ENIC_DEVCMD_PROXY_BY_INDEX ( vf , err , enic ,
vnic_dev_set_mac_addr , mac ) ;
return enic_dev_status_to_errno ( err ) ;
}
2010-12-08 13:53:58 +00:00
} else
return - EINVAL ;
}
2010-05-17 22:50:19 -07:00
static int enic_set_vf_port ( struct net_device * netdev , int vf ,
struct nlattr * port [ ] )
{
2021-10-15 15:16:48 -07:00
static const u8 zero_addr [ ETH_ALEN ] = { } ;
2010-05-17 22:50:19 -07:00
struct enic * enic = netdev_priv ( netdev ) ;
2011-03-29 20:36:07 +00:00
struct enic_port_profile prev_pp ;
2011-09-22 03:44:43 +00:00
struct enic_port_profile * pp ;
2011-03-29 20:36:07 +00:00
int err = 0 , restore_pp = 1 ;
2010-06-01 08:59:33 +00:00
2011-09-22 03:44:43 +00:00
ENIC_PP_BY_INDEX ( enic , vf , pp , & err ) ;
if ( err )
return err ;
2010-06-01 08:59:33 +00:00
2011-03-29 20:36:07 +00:00
if ( ! port [ IFLA_PORT_REQUEST ] )
return - EOPNOTSUPP ;
2011-09-22 03:44:43 +00:00
memcpy ( & prev_pp , pp , sizeof ( * enic - > pp ) ) ;
memset ( pp , 0 , sizeof ( * enic - > pp ) ) ;
2011-03-29 20:36:07 +00:00
2011-09-22 03:44:43 +00:00
pp - > set | = ENIC_SET_REQUEST ;
pp - > request = nla_get_u8 ( port [ IFLA_PORT_REQUEST ] ) ;
2010-06-01 08:59:33 +00:00
if ( port [ IFLA_PORT_PROFILE ] ) {
2011-09-22 03:44:43 +00:00
pp - > set | = ENIC_SET_NAME ;
memcpy ( pp - > name , nla_data ( port [ IFLA_PORT_PROFILE ] ) ,
2010-06-01 08:59:33 +00:00
PORT_PROFILE_MAX ) ;
}
if ( port [ IFLA_PORT_INSTANCE_UUID ] ) {
2011-09-22 03:44:43 +00:00
pp - > set | = ENIC_SET_INSTANCE ;
memcpy ( pp - > instance_uuid ,
2010-06-01 08:59:33 +00:00
nla_data ( port [ IFLA_PORT_INSTANCE_UUID ] ) , PORT_UUID_MAX ) ;
}
if ( port [ IFLA_PORT_HOST_UUID ] ) {
2011-09-22 03:44:43 +00:00
pp - > set | = ENIC_SET_HOST ;
memcpy ( pp - > host_uuid ,
2010-06-01 08:59:33 +00:00
nla_data ( port [ IFLA_PORT_HOST_UUID ] ) , PORT_UUID_MAX ) ;
}
2010-05-17 22:50:19 -07:00
2012-02-20 00:11:58 +00:00
if ( vf = = PORT_SELF_VF ) {
/* Special case handling: mac came from IFLA_VF_MAC */
if ( ! is_zero_ether_addr ( prev_pp . vf_mac ) )
memcpy ( pp - > mac_addr , prev_pp . vf_mac , ETH_ALEN ) ;
2010-05-22 17:29:58 +00:00
2012-02-20 00:11:58 +00:00
if ( is_zero_ether_addr ( netdev - > dev_addr ) )
eth_hw_addr_random ( netdev ) ;
} else {
/* SR-IOV VF: get mac from adapter */
ENIC_DEVCMD_PROXY_BY_INDEX ( vf , err , enic ,
vnic_dev_get_mac_addr , pp - > mac_addr ) ;
if ( err ) {
netdev_err ( netdev , " Error getting mac for vf %d \n " , vf ) ;
memcpy ( pp , & prev_pp , sizeof ( * pp ) ) ;
return enic_dev_status_to_errno ( err ) ;
}
}
2010-05-17 22:50:19 -07:00
2011-09-22 03:44:43 +00:00
err = enic_process_set_pp_request ( enic , vf , & prev_pp , & restore_pp ) ;
2011-03-29 20:36:07 +00:00
if ( err ) {
if ( restore_pp ) {
/* Things are still the way they were: Implicit
* DISASSOCIATE failed
*/
2011-09-22 03:44:43 +00:00
memcpy ( pp , & prev_pp , sizeof ( * pp ) ) ;
2011-03-29 20:36:07 +00:00
} else {
2011-09-22 03:44:43 +00:00
memset ( pp , 0 , sizeof ( * pp ) ) ;
if ( vf = = PORT_SELF_VF )
2021-10-15 15:16:48 -07:00
eth_hw_addr_set ( netdev , zero_addr ) ;
2011-03-29 20:36:07 +00:00
}
} else {
/* Set flag to indicate that the port assoc/disassoc
* request has been sent out to fw
*/
2011-09-22 03:44:43 +00:00
pp - > set | = ENIC_PORT_REQUEST_APPLIED ;
2011-03-29 20:36:07 +00:00
/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
2011-09-22 03:44:43 +00:00
if ( pp - > request = = PORT_REQUEST_DISASSOCIATE ) {
2015-03-02 19:54:47 -08:00
eth_zero_addr ( pp - > mac_addr ) ;
2011-09-22 03:44:43 +00:00
if ( vf = = PORT_SELF_VF )
2021-10-15 15:16:48 -07:00
eth_hw_addr_set ( netdev , zero_addr ) ;
2011-03-29 20:36:07 +00:00
}
}
2010-12-08 13:54:03 +00:00
2012-02-20 00:11:58 +00:00
if ( vf = = PORT_SELF_VF )
2015-03-02 19:54:47 -08:00
eth_zero_addr ( pp - > vf_mac ) ;
2010-12-08 13:54:03 +00:00
return err ;
2010-05-17 22:50:19 -07:00
}
static int enic_get_vf_port ( struct net_device * netdev , int vf ,
struct sk_buff * skb )
{
struct enic * enic = netdev_priv ( netdev ) ;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS ;
2011-09-22 03:44:43 +00:00
struct enic_port_profile * pp ;
2011-03-29 20:36:07 +00:00
int err ;
2010-05-17 22:50:19 -07:00
2011-09-22 03:44:43 +00:00
ENIC_PP_BY_INDEX ( enic , vf , pp , & err ) ;
if ( err )
return err ;
if ( ! ( pp - > set & ENIC_PORT_REQUEST_APPLIED ) )
2010-06-01 08:59:33 +00:00
return - ENODATA ;
2010-05-17 22:50:19 -07:00
2011-09-22 03:44:43 +00:00
err = enic_process_get_pp_request ( enic , vf , pp - > request , & response ) ;
2010-05-17 22:50:19 -07:00
if ( err )
2011-03-29 20:36:07 +00:00
return err ;
2010-05-17 22:50:19 -07:00
2012-04-01 20:22:22 -04:00
if ( nla_put_u16 ( skb , IFLA_PORT_REQUEST , pp - > request ) | |
nla_put_u16 ( skb , IFLA_PORT_RESPONSE , response ) | |
( ( pp - > set & ENIC_SET_NAME ) & &
nla_put ( skb , IFLA_PORT_PROFILE , PORT_PROFILE_MAX , pp - > name ) ) | |
( ( pp - > set & ENIC_SET_INSTANCE ) & &
nla_put ( skb , IFLA_PORT_INSTANCE_UUID , PORT_UUID_MAX ,
pp - > instance_uuid ) ) | |
( ( pp - > set & ENIC_SET_HOST ) & &
nla_put ( skb , IFLA_PORT_HOST_UUID , PORT_UUID_MAX , pp - > host_uuid ) ) )
goto nla_put_failure ;
2010-05-17 22:50:19 -07:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
2008-09-15 09:17:11 -07:00
static void enic_free_rq_buf ( struct vnic_rq * rq , struct vnic_rq_buf * buf )
{
struct enic * enic = vnic_dev_priv ( rq - > vdev ) ;
if ( ! buf - > os_buf )
return ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_single ( & enic - > pdev - > dev , buf - > dma_addr , buf - > len ,
DMA_FROM_DEVICE ) ;
2008-09-15 09:17:11 -07:00
dev_kfree_skb_any ( buf - > os_buf ) ;
2014-09-03 03:17:19 +05:30
buf - > os_buf = NULL ;
2008-09-15 09:17:11 -07:00
}
static int enic_rq_alloc_buf ( struct vnic_rq * rq )
{
struct enic * enic = vnic_dev_priv ( rq - > vdev ) ;
2009-09-03 17:02:08 +00:00
struct net_device * netdev = enic - > netdev ;
2008-09-15 09:17:11 -07:00
struct sk_buff * skb ;
2010-06-24 10:51:59 +00:00
unsigned int len = netdev - > mtu + VLAN_ETH_HLEN ;
2008-09-15 09:17:11 -07:00
unsigned int os_buf_index = 0 ;
dma_addr_t dma_addr ;
2014-09-03 03:17:19 +05:30
struct vnic_rq_buf * buf = rq - > to_use ;
if ( buf - > os_buf ) {
2014-11-06 15:21:39 +05:30
enic_queue_rq_desc ( rq , buf - > os_buf , os_buf_index , buf - > dma_addr ,
buf - > len ) ;
2008-09-15 09:17:11 -07:00
2014-09-03 03:17:19 +05:30
return 0 ;
}
2009-10-13 05:34:20 +00:00
skb = netdev_alloc_skb_ip_align ( netdev , len ) ;
2008-09-15 09:17:11 -07:00
if ( ! skb )
return - ENOMEM ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_addr = dma_map_single ( & enic - > pdev - > dev , skb - > data , len ,
DMA_FROM_DEVICE ) ;
2014-12-24 15:59:37 +05:30
if ( unlikely ( enic_dma_map_check ( enic , dma_addr ) ) ) {
dev_kfree_skb ( skb ) ;
return - ENOMEM ;
}
2008-09-15 09:17:11 -07:00
enic_queue_rq_desc ( rq , skb , os_buf_index ,
dma_addr , len ) ;
return 0 ;
}
2014-05-20 03:14:05 +05:30
static void enic_intr_update_pkt_size ( struct vnic_rx_bytes_counter * pkt_size ,
u32 pkt_len )
{
if ( ENIC_LARGE_PKT_THRESHOLD < = pkt_len )
pkt_size - > large_pkt_bytes_cnt + = pkt_len ;
else
pkt_size - > small_pkt_bytes_cnt + = pkt_len ;
}
2014-09-03 03:17:19 +05:30
static bool enic_rxcopybreak ( struct net_device * netdev , struct sk_buff * * skb ,
struct vnic_rq_buf * buf , u16 len )
{
struct enic * enic = netdev_priv ( netdev ) ;
struct sk_buff * new_skb ;
if ( len > enic - > rx_copybreak )
return false ;
new_skb = netdev_alloc_skb_ip_align ( netdev , len ) ;
if ( ! new_skb )
return false ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_sync_single_for_cpu ( & enic - > pdev - > dev , buf - > dma_addr , len ,
DMA_FROM_DEVICE ) ;
2014-09-03 03:17:19 +05:30
memcpy ( new_skb - > data , ( * skb ) - > data , len ) ;
* skb = new_skb ;
return true ;
}
2008-09-15 09:17:11 -07:00
static void enic_rq_indicate_buf ( struct vnic_rq * rq ,
struct cq_desc * cq_desc , struct vnic_rq_buf * buf ,
int skipped , void * opaque )
{
struct enic * enic = vnic_dev_priv ( rq - > vdev ) ;
2008-11-21 21:26:55 -08:00
struct net_device * netdev = enic - > netdev ;
2008-09-15 09:17:11 -07:00
struct sk_buff * skb ;
2014-05-20 03:14:05 +05:30
struct vnic_cq * cq = & enic - > cq [ enic_cq_rq ( enic , rq - > index ) ] ;
2008-09-15 09:17:11 -07:00
u8 type , color , eop , sop , ingress_port , vlan_stripped ;
u8 fcoe , fcoe_sof , fcoe_fc_crc_ok , fcoe_enc_error , fcoe_eof ;
u8 tcp_udp_csum_ok , udp , tcp , ipv4_csum_ok ;
u8 ipv6 , ipv4 , ipv4_fragment , fcs_ok , rss_type , csum_not_calc ;
u8 packet_error ;
2010-06-24 10:49:51 +00:00
u16 q_number , completed_index , bytes_written , vlan_tci , checksum ;
2008-09-15 09:17:11 -07:00
u32 rss_hash ;
2017-02-08 16:43:08 -08:00
bool outer_csum_ok = true , encap = false ;
2008-09-15 09:17:11 -07:00
if ( skipped )
return ;
skb = buf - > os_buf ;
cq_enet_rq_desc_dec ( ( struct cq_enet_rq_desc * ) cq_desc ,
& type , & color , & q_number , & completed_index ,
& ingress_port , & fcoe , & eop , & sop , & rss_type ,
& csum_not_calc , & rss_hash , & bytes_written ,
2010-06-24 10:49:51 +00:00
& packet_error , & vlan_stripped , & vlan_tci , & checksum ,
2008-09-15 09:17:11 -07:00
& fcoe_sof , & fcoe_fc_crc_ok , & fcoe_enc_error ,
& fcoe_eof , & tcp_udp_csum_ok , & udp , & tcp ,
& ipv4_csum_ok , & ipv6 , & ipv4 , & ipv4_fragment ,
& fcs_ok ) ;
if ( packet_error ) {
2009-09-03 17:02:19 +00:00
if ( ! fcs_ok ) {
if ( bytes_written > 0 )
enic - > rq_bad_fcs + + ;
else if ( bytes_written = = 0 )
enic - > rq_truncated_pkts + + ;
}
2008-09-15 09:17:11 -07:00
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_single ( & enic - > pdev - > dev , buf - > dma_addr , buf - > len ,
DMA_FROM_DEVICE ) ;
2008-09-15 09:17:11 -07:00
dev_kfree_skb_any ( skb ) ;
2014-11-06 15:21:38 +05:30
buf - > os_buf = NULL ;
2008-09-15 09:17:11 -07:00
return ;
}
if ( eop & & bytes_written > 0 ) {
/* Good receive
*/
2014-09-03 03:17:19 +05:30
if ( ! enic_rxcopybreak ( netdev , & skb , buf , bytes_written ) ) {
buf - > os_buf = NULL ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_single ( & enic - > pdev - > dev , buf - > dma_addr ,
buf - > len , DMA_FROM_DEVICE ) ;
2014-09-03 03:17:19 +05:30
}
prefetch ( skb - > data - NET_IP_ALIGN ) ;
2008-09-15 09:17:11 -07:00
skb_put ( skb , bytes_written ) ;
2008-11-21 21:26:55 -08:00
skb - > protocol = eth_type_trans ( skb , netdev ) ;
2013-09-04 11:17:15 +05:30
skb_record_rx_queue ( skb , q_number ) ;
2017-02-08 16:43:08 -08:00
if ( ( netdev - > features & NETIF_F_RXHASH ) & & rss_hash & &
( type = = 3 ) ) {
2016-11-01 17:58:50 -07:00
switch ( rss_type ) {
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 :
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 :
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX :
skb_set_hash ( skb , rss_hash , PKT_HASH_TYPE_L4 ) ;
break ;
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 :
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 :
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX :
skb_set_hash ( skb , rss_hash , PKT_HASH_TYPE_L3 ) ;
break ;
}
2013-09-04 11:17:15 +05:30
}
2017-02-08 16:43:08 -08:00
if ( enic - > vxlan . vxlan_udp_port_number ) {
switch ( enic - > vxlan . patch_level ) {
case 0 :
if ( fcoe ) {
encap = true ;
outer_csum_ok = fcoe_fc_crc_ok ;
}
break ;
case 2 :
if ( ( type = = 7 ) & &
( rss_hash & BIT ( 0 ) ) ) {
encap = true ;
outer_csum_ok = ( rss_hash & BIT ( 1 ) ) & &
( rss_hash & BIT ( 2 ) ) ;
}
break ;
}
}
2008-09-15 09:17:11 -07:00
2014-12-18 15:58:42 +05:30
/* Hardware does not provide whole packet checksum. It only
* provides pseudo checksum . Since hw validates the packet
* checksum but not provide us the checksum value . use
* CHECSUM_UNNECESSARY .
2017-02-08 16:43:08 -08:00
*
* In case of encap pkt tcp_udp_csum_ok / tcp_udp_csum_ok is
* inner csum_ok . outer_csum_ok is set by hw when outer udp
* csum is correct or is zero .
2014-12-18 15:58:42 +05:30
*/
2017-02-08 16:43:08 -08:00
if ( ( netdev - > features & NETIF_F_RXCSUM ) & & ! csum_not_calc & &
2019-01-30 06:59:00 -08:00
tcp_udp_csum_ok & & outer_csum_ok & &
( ipv4_csum_ok | | ipv6 ) ) {
2014-12-18 15:58:42 +05:30
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2017-02-08 16:43:08 -08:00
skb - > csum_level = encap ;
}
2008-09-15 09:17:11 -07:00
2011-07-20 04:54:18 +00:00
if ( vlan_stripped )
2013-04-19 02:04:30 +00:00
__vlan_hwaccel_put_tag ( skb , htons ( ETH_P_8021Q ) , vlan_tci ) ;
2008-09-15 09:17:11 -07:00
2014-06-23 16:08:04 +05:30
skb_mark_napi_id ( skb , & enic - > napi [ rq - > index ] ) ;
2017-02-03 17:28:21 -05:00
if ( ! ( netdev - > features & NETIF_F_GRO ) )
2011-07-20 04:54:18 +00:00
netif_receive_skb ( skb ) ;
2014-06-23 16:08:04 +05:30
else
napi_gro_receive ( & enic - > napi [ q_number ] , skb ) ;
2014-05-20 03:14:05 +05:30
if ( enic - > rx_coalesce_setting . use_adaptive_rx_coalesce )
enic_intr_update_pkt_size ( & cq - > pkt_size_counter ,
bytes_written ) ;
2008-09-15 09:17:11 -07:00
} else {
/* Buffer overflow
*/
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_unmap_single ( & enic - > pdev - > dev , buf - > dma_addr , buf - > len ,
DMA_FROM_DEVICE ) ;
2008-09-15 09:17:11 -07:00
dev_kfree_skb_any ( skb ) ;
2014-11-06 15:21:38 +05:30
buf - > os_buf = NULL ;
2008-09-15 09:17:11 -07:00
}
}
static int enic_rq_service ( struct vnic_dev * vdev , struct cq_desc * cq_desc ,
u8 type , u16 q_number , u16 completed_index , void * opaque )
{
struct enic * enic = vnic_dev_priv ( vdev ) ;
vnic_rq_service ( & enic - > rq [ q_number ] , cq_desc ,
completed_index , VNIC_RQ_RETURN_DESC ,
enic_rq_indicate_buf , opaque ) ;
return 0 ;
}
2015-07-15 15:34:39 +05:30
static void enic_set_int_moderation ( struct enic * enic , struct vnic_rq * rq )
{
unsigned int intr = enic_msix_rq_intr ( enic , rq - > index ) ;
struct vnic_cq * cq = & enic - > cq [ enic_cq_rq ( enic , rq - > index ) ] ;
u32 timer = cq - > tobe_rx_coal_timeval ;
if ( cq - > tobe_rx_coal_timeval ! = cq - > cur_rx_coal_timeval ) {
vnic_intr_coalescing_timer_set ( & enic - > intr [ intr ] , timer ) ;
cq - > cur_rx_coal_timeval = cq - > tobe_rx_coal_timeval ;
}
}
static void enic_calc_int_moderation ( struct enic * enic , struct vnic_rq * rq )
{
struct enic_rx_coal * rx_coal = & enic - > rx_coalesce_setting ;
struct vnic_cq * cq = & enic - > cq [ enic_cq_rq ( enic , rq - > index ) ] ;
struct vnic_rx_bytes_counter * pkt_size_counter = & cq - > pkt_size_counter ;
int index ;
u32 timer ;
u32 range_start ;
u32 traffic ;
u64 delta ;
ktime_t now = ktime_get ( ) ;
delta = ktime_us_delta ( now , cq - > prev_ts ) ;
if ( delta < ENIC_AIC_TS_BREAK )
return ;
cq - > prev_ts = now ;
traffic = pkt_size_counter - > large_pkt_bytes_cnt +
pkt_size_counter - > small_pkt_bytes_cnt ;
/* The table takes Mbps
* traffic * = 8 = > bits
* traffic * = ( 10 ^ 6 / delta ) = > bps
* traffic / = 10 ^ 6 = > Mbps
*
* Combining , traffic * = ( 8 / delta )
*/
traffic < < = 3 ;
traffic = delta > UINT_MAX ? 0 : traffic / ( u32 ) delta ;
for ( index = 0 ; index < ENIC_MAX_COALESCE_TIMERS ; index + + )
if ( traffic < mod_table [ index ] . rx_rate )
break ;
range_start = ( pkt_size_counter - > small_pkt_bytes_cnt >
pkt_size_counter - > large_pkt_bytes_cnt < < 1 ) ?
rx_coal - > small_pkt_range_start :
rx_coal - > large_pkt_range_start ;
timer = range_start + ( ( rx_coal - > range_end - range_start ) *
mod_table [ index ] . range_percent / 100 ) ;
/* Damping */
cq - > tobe_rx_coal_timeval = ( timer + cq - > tobe_rx_coal_timeval ) > > 1 ;
pkt_size_counter - > large_pkt_bytes_cnt = 0 ;
pkt_size_counter - > small_pkt_bytes_cnt = 0 ;
}
2008-09-15 09:17:11 -07:00
static int enic_poll ( struct napi_struct * napi , int budget )
{
2010-10-20 10:16:59 +00:00
struct net_device * netdev = napi - > dev ;
struct enic * enic = netdev_priv ( netdev ) ;
unsigned int cq_rq = enic_cq_rq ( enic , 0 ) ;
unsigned int cq_wq = enic_cq_wq ( enic , 0 ) ;
unsigned int intr = enic_legacy_io_intr ( ) ;
2008-09-15 09:17:11 -07:00
unsigned int rq_work_to_do = budget ;
2017-12-21 08:12:28 -08:00
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET ;
2014-03-14 18:02:08 -07:00
unsigned int work_done , rq_work_done = 0 , wq_work_done ;
2009-12-23 13:27:38 +00:00
int err ;
2008-09-15 09:17:11 -07:00
2014-06-23 16:08:04 +05:30
wq_work_done = vnic_cq_service ( & enic - > cq [ cq_wq ] , wq_work_to_do ,
enic_wq_service , NULL ) ;
2014-03-14 18:02:08 -07:00
if ( budget > 0 )
rq_work_done = vnic_cq_service ( & enic - > cq [ cq_rq ] ,
rq_work_to_do , enic_rq_service , NULL ) ;
2008-09-15 09:17:11 -07:00
/* Accumulate intr event credits for this polling
* cycle . An intr event is the completion of a
* a WQ or RQ packet .
*/
work_done = rq_work_done + wq_work_done ;
if ( work_done > 0 )
2010-10-20 10:16:59 +00:00
vnic_intr_return_credits ( & enic - > intr [ intr ] ,
2008-09-15 09:17:11 -07:00
work_done ,
0 /* don't unmask intr */ ,
0 /* don't reset intr timer */ ) ;
2011-02-04 16:17:21 +00:00
err = vnic_rq_fill ( & enic - > rq [ 0 ] , enic_rq_alloc_buf ) ;
2008-09-15 09:17:11 -07:00
2009-12-23 13:27:38 +00:00
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again .
*/
2008-09-15 09:17:11 -07:00
2009-12-23 13:27:38 +00:00
if ( err )
rq_work_done = rq_work_to_do ;
2015-07-15 15:34:39 +05:30
if ( enic - > rx_coalesce_setting . use_adaptive_rx_coalesce )
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic .
*/
enic_calc_int_moderation ( enic , & enic - > rq [ 0 ] ) ;
2008-09-15 09:17:11 -07:00
2017-05-22 12:19:48 -07:00
if ( ( rq_work_done < budget ) & & napi_complete_done ( napi , rq_work_done ) ) {
2008-09-15 09:17:11 -07:00
2009-12-23 13:27:38 +00:00
/* Some work done, but not enough to stay in polling,
2010-06-24 10:49:25 +00:00
* exit polling
2008-09-15 09:17:11 -07:00
*/
2015-07-15 15:34:39 +05:30
if ( enic - > rx_coalesce_setting . use_adaptive_rx_coalesce )
enic_set_int_moderation ( enic , & enic - > rq [ 0 ] ) ;
2010-10-20 10:16:59 +00:00
vnic_intr_unmask ( & enic - > intr [ intr ] ) ;
2008-09-15 09:17:11 -07:00
}
return rq_work_done ;
}
2014-06-23 16:08:01 +05:30
# ifdef CONFIG_RFS_ACCEL
static void enic_free_rx_cpu_rmap ( struct enic * enic )
{
free_irq_cpu_rmap ( enic - > netdev - > rx_cpu_rmap ) ;
enic - > netdev - > rx_cpu_rmap = NULL ;
}
static void enic_set_rx_cpu_rmap ( struct enic * enic )
{
int i , res ;
if ( vnic_dev_get_intr_mode ( enic - > vdev ) = = VNIC_DEV_INTR_MODE_MSIX ) {
enic - > netdev - > rx_cpu_rmap = alloc_irq_cpu_rmap ( enic - > rq_count ) ;
if ( unlikely ( ! enic - > netdev - > rx_cpu_rmap ) )
return ;
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
res = irq_cpu_rmap_add ( enic - > netdev - > rx_cpu_rmap ,
enic - > msix_entry [ i ] . vector ) ;
if ( unlikely ( res ) ) {
enic_free_rx_cpu_rmap ( enic ) ;
return ;
}
}
}
}
# else
static void enic_free_rx_cpu_rmap ( struct enic * enic )
{
}
static void enic_set_rx_cpu_rmap ( struct enic * enic )
{
}
# endif /* CONFIG_RFS_ACCEL */
2014-06-23 16:08:05 +05:30
static int enic_poll_msix_wq ( struct napi_struct * napi , int budget )
{
struct net_device * netdev = napi - > dev ;
struct enic * enic = netdev_priv ( netdev ) ;
unsigned int wq_index = ( napi - & enic - > napi [ 0 ] ) - enic - > rq_count ;
struct vnic_wq * wq = & enic - > wq [ wq_index ] ;
unsigned int cq ;
unsigned int intr ;
2017-12-21 08:12:28 -08:00
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET ;
2014-06-23 16:08:05 +05:30
unsigned int wq_work_done ;
unsigned int wq_irq ;
wq_irq = wq - > index ;
cq = enic_cq_wq ( enic , wq_irq ) ;
intr = enic_msix_wq_intr ( enic , wq_irq ) ;
wq_work_done = vnic_cq_service ( & enic - > cq [ cq ] , wq_work_to_do ,
enic_wq_service , NULL ) ;
vnic_intr_return_credits ( & enic - > intr [ intr ] , wq_work_done ,
0 /* don't unmask intr */ ,
1 /* reset intr timer */ ) ;
if ( ! wq_work_done ) {
napi_complete ( napi ) ;
vnic_intr_unmask ( & enic - > intr [ intr ] ) ;
2014-11-13 04:12:06 +05:30
return 0 ;
2014-06-23 16:08:05 +05:30
}
2014-11-13 04:12:06 +05:30
return budget ;
2014-06-23 16:08:05 +05:30
}
static int enic_poll_msix_rq ( struct napi_struct * napi , int budget )
2008-09-15 09:17:11 -07:00
{
2010-10-20 10:16:59 +00:00
struct net_device * netdev = napi - > dev ;
struct enic * enic = netdev_priv ( netdev ) ;
unsigned int rq = ( napi - & enic - > napi [ 0 ] ) ;
unsigned int cq = enic_cq_rq ( enic , rq ) ;
unsigned int intr = enic_msix_rq_intr ( enic , rq ) ;
2008-09-15 09:17:11 -07:00
unsigned int work_to_do = budget ;
2014-03-14 18:02:08 -07:00
unsigned int work_done = 0 ;
2009-12-23 13:27:38 +00:00
int err ;
2008-09-15 09:17:11 -07:00
/* Service RQ
*/
2014-03-14 18:02:08 -07:00
if ( budget > 0 )
work_done = vnic_cq_service ( & enic - > cq [ cq ] ,
work_to_do , enic_rq_service , NULL ) ;
2008-09-15 09:17:11 -07:00
2009-12-23 13:27:38 +00:00
/* Return intr event credits for this polling
* cycle . An intr event is the completion of a
* RQ packet .
*/
2008-09-15 09:17:11 -07:00
2009-12-23 13:27:38 +00:00
if ( work_done > 0 )
2010-10-20 10:16:59 +00:00
vnic_intr_return_credits ( & enic - > intr [ intr ] ,
2008-09-15 09:17:11 -07:00
work_done ,
0 /* don't unmask intr */ ,
0 /* don't reset intr timer */ ) ;
2011-02-04 16:17:21 +00:00
err = vnic_rq_fill ( & enic - > rq [ rq ] , enic_rq_alloc_buf ) ;
2009-12-23 13:27:38 +00:00
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again .
*/
if ( err )
work_done = work_to_do ;
2014-05-20 03:14:05 +05:30
if ( enic - > rx_coalesce_setting . use_adaptive_rx_coalesce )
2015-07-15 15:34:39 +05:30
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic .
2014-05-20 03:14:05 +05:30
*/
enic_calc_int_moderation ( enic , & enic - > rq [ rq ] ) ;
2009-12-23 13:27:38 +00:00
2017-05-22 12:19:48 -07:00
if ( ( work_done < budget ) & & napi_complete_done ( napi , work_done ) ) {
2009-12-23 13:27:38 +00:00
/* Some work done, but not enough to stay in polling,
2010-06-24 10:49:25 +00:00
* exit polling
2008-09-15 09:17:11 -07:00
*/
2014-05-20 03:14:05 +05:30
if ( enic - > rx_coalesce_setting . use_adaptive_rx_coalesce )
enic_set_int_moderation ( enic , & enic - > rq [ rq ] ) ;
2010-10-20 10:16:59 +00:00
vnic_intr_unmask ( & enic - > intr [ intr ] ) ;
2008-09-15 09:17:11 -07:00
}
return work_done ;
}
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 14:43:17 -07:00
static void enic_notify_timer ( struct timer_list * t )
2008-09-15 09:17:11 -07:00
{
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 14:43:17 -07:00
struct enic * enic = from_timer ( enic , t , notify_timer ) ;
2008-09-15 09:17:11 -07:00
enic_notify_check ( enic ) ;
2008-09-24 11:23:32 -07:00
mod_timer ( & enic - > notify_timer ,
round_jiffies ( jiffies + ENIC_NOTIFY_TIMER_PERIOD ) ) ;
2008-09-15 09:17:11 -07:00
}
static void enic_free_intr ( struct enic * enic )
{
struct net_device * netdev = enic - > netdev ;
unsigned int i ;
2014-06-23 16:08:01 +05:30
enic_free_rx_cpu_rmap ( enic ) ;
2008-09-15 09:17:11 -07:00
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_INTX :
free_irq ( enic - > pdev - > irq , netdev ) ;
break ;
2008-09-24 11:23:42 -07:00
case VNIC_DEV_INTR_MODE_MSI :
free_irq ( enic - > pdev - > irq , enic ) ;
break ;
2008-09-15 09:17:11 -07:00
case VNIC_DEV_INTR_MODE_MSIX :
for ( i = 0 ; i < ARRAY_SIZE ( enic - > msix ) ; i + + )
if ( enic - > msix [ i ] . requested )
free_irq ( enic - > msix_entry [ i ] . vector ,
enic - > msix [ i ] . devid ) ;
break ;
default :
break ;
}
}
static int enic_request_intr ( struct enic * enic )
{
struct net_device * netdev = enic - > netdev ;
2010-10-20 10:16:59 +00:00
unsigned int i , intr ;
2008-09-15 09:17:11 -07:00
int err = 0 ;
2014-06-23 16:08:01 +05:30
enic_set_rx_cpu_rmap ( enic ) ;
2008-09-15 09:17:11 -07:00
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_INTX :
err = request_irq ( enic - > pdev - > irq , enic_isr_legacy ,
IRQF_SHARED , netdev - > name , netdev ) ;
break ;
case VNIC_DEV_INTR_MODE_MSI :
err = request_irq ( enic - > pdev - > irq , enic_isr_msi ,
0 , netdev - > name , enic ) ;
break ;
case VNIC_DEV_INTR_MODE_MSIX :
2010-10-20 10:16:59 +00:00
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
intr = enic_msix_rq_intr ( enic , i ) ;
2013-01-17 21:46:18 +00:00
snprintf ( enic - > msix [ intr ] . devname ,
sizeof ( enic - > msix [ intr ] . devname ) ,
enic: Fix format truncation warning
With -Wformat-truncation, gcc throws the following warning.
Fix this by increasing the size of devname to accommodate 15 character
netdev interface name and description.
Remove length format precision for %s. We can fit entire name.
Also increment the version.
drivers/net/ethernet/cisco/enic/enic_main.c: In function ‘enic_open’:
drivers/net/ethernet/cisco/enic/enic_main.c:1740:15: warning: ‘%u’ directive output may be truncated writing between 1 and 2 bytes into a region of size between 1 and 12 [-Wformat-truncation=]
"%.11s-rx-%u", netdev->name, i);
^~
drivers/net/ethernet/cisco/enic/enic_main.c:1740:5: note: directive argument in the range [0, 16]
"%.11s-rx-%u", netdev->name, i);
^~~~~~~~~~~~~
drivers/net/ethernet/cisco/enic/enic_main.c:1738:4: note: ‘snprintf’ output between 6 and 18 bytes into a destination of size 16
snprintf(enic->msix[intr].devname,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sizeof(enic->msix[intr].devname),
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"%.11s-rx-%u", netdev->name, i);
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Signed-off-by: Govindarajulu Varadarajan <gvaradar@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-19 16:28:44 -07:00
" %s-rx-%u " , netdev - > name , i ) ;
2014-06-23 16:08:05 +05:30
enic - > msix [ intr ] . isr = enic_isr_msix ;
2010-10-20 10:16:59 +00:00
enic - > msix [ intr ] . devid = & enic - > napi [ i ] ;
}
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
for ( i = 0 ; i < enic - > wq_count ; i + + ) {
2014-06-23 16:08:05 +05:30
int wq = enic_cq_wq ( enic , i ) ;
2010-10-20 10:16:59 +00:00
intr = enic_msix_wq_intr ( enic , i ) ;
2013-01-17 21:46:18 +00:00
snprintf ( enic - > msix [ intr ] . devname ,
sizeof ( enic - > msix [ intr ] . devname ) ,
enic: Fix format truncation warning
With -Wformat-truncation, gcc throws the following warning.
Fix this by increasing the size of devname to accommodate 15 character
netdev interface name and description.
Remove length format precision for %s. We can fit entire name.
Also increment the version.
drivers/net/ethernet/cisco/enic/enic_main.c: In function ‘enic_open’:
drivers/net/ethernet/cisco/enic/enic_main.c:1740:15: warning: ‘%u’ directive output may be truncated writing between 1 and 2 bytes into a region of size between 1 and 12 [-Wformat-truncation=]
"%.11s-rx-%u", netdev->name, i);
^~
drivers/net/ethernet/cisco/enic/enic_main.c:1740:5: note: directive argument in the range [0, 16]
"%.11s-rx-%u", netdev->name, i);
^~~~~~~~~~~~~
drivers/net/ethernet/cisco/enic/enic_main.c:1738:4: note: ‘snprintf’ output between 6 and 18 bytes into a destination of size 16
snprintf(enic->msix[intr].devname,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sizeof(enic->msix[intr].devname),
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"%.11s-rx-%u", netdev->name, i);
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Signed-off-by: Govindarajulu Varadarajan <gvaradar@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-19 16:28:44 -07:00
" %s-tx-%u " , netdev - > name , i ) ;
2014-06-23 16:08:05 +05:30
enic - > msix [ intr ] . isr = enic_isr_msix ;
enic - > msix [ intr ] . devid = & enic - > napi [ wq ] ;
2010-10-20 10:16:59 +00:00
}
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
intr = enic_msix_err_intr ( enic ) ;
2013-01-17 21:46:18 +00:00
snprintf ( enic - > msix [ intr ] . devname ,
sizeof ( enic - > msix [ intr ] . devname ) ,
enic: Fix format truncation warning
With -Wformat-truncation, gcc throws the following warning.
Fix this by increasing the size of devname to accommodate 15 character
netdev interface name and description.
Remove length format precision for %s. We can fit entire name.
Also increment the version.
drivers/net/ethernet/cisco/enic/enic_main.c: In function ‘enic_open’:
drivers/net/ethernet/cisco/enic/enic_main.c:1740:15: warning: ‘%u’ directive output may be truncated writing between 1 and 2 bytes into a region of size between 1 and 12 [-Wformat-truncation=]
"%.11s-rx-%u", netdev->name, i);
^~
drivers/net/ethernet/cisco/enic/enic_main.c:1740:5: note: directive argument in the range [0, 16]
"%.11s-rx-%u", netdev->name, i);
^~~~~~~~~~~~~
drivers/net/ethernet/cisco/enic/enic_main.c:1738:4: note: ‘snprintf’ output between 6 and 18 bytes into a destination of size 16
snprintf(enic->msix[intr].devname,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sizeof(enic->msix[intr].devname),
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"%.11s-rx-%u", netdev->name, i);
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Signed-off-by: Govindarajulu Varadarajan <gvaradar@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-19 16:28:44 -07:00
" %s-err " , netdev - > name ) ;
2010-10-20 10:16:59 +00:00
enic - > msix [ intr ] . isr = enic_isr_msix_err ;
enic - > msix [ intr ] . devid = enic ;
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
intr = enic_msix_notify_intr ( enic ) ;
2013-01-17 21:46:18 +00:00
snprintf ( enic - > msix [ intr ] . devname ,
sizeof ( enic - > msix [ intr ] . devname ) ,
enic: Fix format truncation warning
With -Wformat-truncation, gcc throws the following warning.
Fix this by increasing the size of devname to accommodate 15 character
netdev interface name and description.
Remove length format precision for %s. We can fit entire name.
Also increment the version.
drivers/net/ethernet/cisco/enic/enic_main.c: In function ‘enic_open’:
drivers/net/ethernet/cisco/enic/enic_main.c:1740:15: warning: ‘%u’ directive output may be truncated writing between 1 and 2 bytes into a region of size between 1 and 12 [-Wformat-truncation=]
"%.11s-rx-%u", netdev->name, i);
^~
drivers/net/ethernet/cisco/enic/enic_main.c:1740:5: note: directive argument in the range [0, 16]
"%.11s-rx-%u", netdev->name, i);
^~~~~~~~~~~~~
drivers/net/ethernet/cisco/enic/enic_main.c:1738:4: note: ‘snprintf’ output between 6 and 18 bytes into a destination of size 16
snprintf(enic->msix[intr].devname,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sizeof(enic->msix[intr].devname),
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"%.11s-rx-%u", netdev->name, i);
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Signed-off-by: Govindarajulu Varadarajan <gvaradar@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-19 16:28:44 -07:00
" %s-notify " , netdev - > name ) ;
2010-10-20 10:16:59 +00:00
enic - > msix [ intr ] . isr = enic_isr_msix_notify ;
enic - > msix [ intr ] . devid = enic ;
for ( i = 0 ; i < ARRAY_SIZE ( enic - > msix ) ; i + + )
enic - > msix [ i ] . requested = 0 ;
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
for ( i = 0 ; i < enic - > intr_count ; i + + ) {
2008-09-15 09:17:11 -07:00
err = request_irq ( enic - > msix_entry [ i ] . vector ,
enic - > msix [ i ] . isr , 0 ,
enic - > msix [ i ] . devname ,
enic - > msix [ i ] . devid ) ;
if ( err ) {
enic_free_intr ( enic ) ;
break ;
}
enic - > msix [ i ] . requested = 1 ;
}
break ;
default :
break ;
}
return err ;
}
2009-12-23 13:27:30 +00:00
static void enic_synchronize_irqs ( struct enic * enic )
{
unsigned int i ;
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_INTX :
case VNIC_DEV_INTR_MODE_MSI :
synchronize_irq ( enic - > pdev - > irq ) ;
break ;
case VNIC_DEV_INTR_MODE_MSIX :
for ( i = 0 ; i < enic - > intr_count ; i + + )
synchronize_irq ( enic - > msix_entry [ i ] . vector ) ;
break ;
default :
break ;
}
}
2014-05-20 03:14:05 +05:30
static void enic_set_rx_coal_setting ( struct enic * enic )
{
unsigned int speed ;
int index = - 1 ;
struct enic_rx_coal * rx_coal = & enic - > rx_coalesce_setting ;
/* 1. Read the link speed from fw
* 2. Pick the default range for the speed
* 3. Update it in enic - > rx_coalesce_setting
*/
speed = vnic_dev_port_speed ( enic - > vdev ) ;
if ( ENIC_LINK_SPEED_10G < speed )
index = ENIC_LINK_40G_INDEX ;
else if ( ENIC_LINK_SPEED_4G < speed )
index = ENIC_LINK_10G_INDEX ;
else
index = ENIC_LINK_4G_INDEX ;
rx_coal - > small_pkt_range_start = mod_range [ index ] . small_pkt_range_start ;
rx_coal - > large_pkt_range_start = mod_range [ index ] . large_pkt_range_start ;
rx_coal - > range_end = ENIC_RX_COALESCE_RANGE_END ;
/* Start with the value provided by UCSM */
for ( index = 0 ; index < enic - > rq_count ; index + + )
enic - > cq [ index ] . cur_rx_coal_timeval =
enic - > config . intr_timer_usec ;
rx_coal - > use_adaptive_rx_coalesce = 1 ;
}
2010-06-24 10:50:12 +00:00
static int enic_dev_notify_set ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
int err ;
2014-06-23 16:08:03 +05:30
spin_lock_bh ( & enic - > devcmd_lock ) ;
2008-09-15 09:17:11 -07:00
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_INTX :
2010-10-20 10:16:59 +00:00
err = vnic_dev_notify_set ( enic - > vdev ,
enic_legacy_notify_intr ( ) ) ;
2008-09-15 09:17:11 -07:00
break ;
case VNIC_DEV_INTR_MODE_MSIX :
2010-10-20 10:16:59 +00:00
err = vnic_dev_notify_set ( enic - > vdev ,
enic_msix_notify_intr ( enic ) ) ;
2008-09-15 09:17:11 -07:00
break ;
default :
err = vnic_dev_notify_set ( enic - > vdev , - 1 /* no intr */ ) ;
break ;
}
2014-06-23 16:08:03 +05:30
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2008-09-15 09:17:11 -07:00
return err ;
}
static void enic_notify_timer_start ( struct enic * enic )
{
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_MSI :
mod_timer ( & enic - > notify_timer , jiffies ) ;
break ;
default :
/* Using intr for notification for INTx/MSI-X */
break ;
2011-06-03 11:51:20 +00:00
}
2008-09-15 09:17:11 -07:00
}
/* rtnl lock is held, process context */
static int enic_open ( struct net_device * netdev )
{
struct enic * enic = netdev_priv ( netdev ) ;
unsigned int i ;
2018-06-18 10:01:05 -07:00
int err , ret ;
2008-09-15 09:17:11 -07:00
2008-09-24 11:23:53 -07:00
err = enic_request_intr ( enic ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
netdev_err ( netdev , " Unable to request irq. \n " ) ;
2008-09-24 11:23:53 -07:00
return err ;
}
2015-10-30 16:52:51 +05:30
enic_init_affinity_hint ( enic ) ;
enic_set_affinity_hint ( enic ) ;
2008-09-24 11:23:53 -07:00
2010-06-24 10:50:12 +00:00
err = enic_dev_notify_set ( enic ) ;
2008-09-24 11:23:53 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
netdev_err ( netdev ,
" Failed to alloc notify buffer, aborting. \n " ) ;
2008-09-24 11:23:53 -07:00
goto err_out_free_intr ;
}
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
2018-03-01 11:07:23 -08:00
/* enable rq before updating rq desc */
vnic_rq_enable ( & enic - > rq [ i ] ) ;
2011-02-04 16:17:21 +00:00
vnic_rq_fill ( & enic - > rq [ i ] , enic_rq_alloc_buf ) ;
2009-12-23 13:27:38 +00:00
/* Need at least one buffer on ring to get going */
if ( vnic_rq_desc_used ( & enic - > rq [ i ] ) = = 0 ) {
2010-06-24 10:50:56 +00:00
netdev_err ( netdev , " Unable to alloc receive buffers \n " ) ;
2009-12-23 13:27:38 +00:00
err = - ENOMEM ;
2015-01-02 20:53:27 +05:30
goto err_out_free_rq ;
2008-09-15 09:17:11 -07:00
}
}
for ( i = 0 ; i < enic - > wq_count ; i + + )
vnic_wq_enable ( & enic - > wq [ i ] ) ;
2012-01-18 04:24:02 +00:00
if ( ! enic_is_dynamic ( enic ) & & ! enic_is_sriov_vf ( enic ) )
2010-12-08 13:54:03 +00:00
enic_dev_add_station_addr ( enic ) ;
2011-09-22 03:44:43 +00:00
2010-12-08 13:19:58 +00:00
enic_set_rx_mode ( netdev ) ;
2008-09-15 09:17:11 -07:00
2013-09-04 11:17:14 +05:30
netif_tx_wake_all_queues ( netdev ) ;
2010-10-20 10:16:59 +00:00
2017-02-03 17:28:21 -05:00
for ( i = 0 ; i < enic - > rq_count ; i + + )
2010-10-20 10:16:59 +00:00
napi_enable ( & enic - > napi [ i ] ) ;
2017-02-03 17:28:21 -05:00
2014-06-23 16:08:05 +05:30
if ( vnic_dev_get_intr_mode ( enic - > vdev ) = = VNIC_DEV_INTR_MODE_MSIX )
for ( i = 0 ; i < enic - > wq_count ; i + + )
napi_enable ( & enic - > napi [ enic_cq_wq ( enic , i ) ] ) ;
2010-06-24 10:50:12 +00:00
enic_dev_enable ( enic ) ;
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > intr_count ; i + + )
vnic_intr_unmask ( & enic - > intr [ i ] ) ;
enic_notify_timer_start ( enic ) ;
2018-06-19 08:15:24 -07:00
enic_rfs_timer_start ( enic ) ;
2008-09-15 09:17:11 -07:00
return 0 ;
2008-09-24 11:23:53 -07:00
2015-01-02 20:53:27 +05:30
err_out_free_rq :
2018-03-01 11:07:23 -08:00
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
2018-06-18 10:01:05 -07:00
ret = vnic_rq_disable ( & enic - > rq [ i ] ) ;
if ( ! ret )
vnic_rq_clean ( & enic - > rq [ i ] , enic_free_rq_buf ) ;
2018-03-01 11:07:23 -08:00
}
2010-06-24 10:50:12 +00:00
enic_dev_notify_unset ( enic ) ;
2008-09-24 11:23:53 -07:00
err_out_free_intr :
2015-10-30 16:52:51 +05:30
enic_unset_affinity_hint ( enic ) ;
2008-09-24 11:23:53 -07:00
enic_free_intr ( enic ) ;
return err ;
2008-09-15 09:17:11 -07:00
}
/* rtnl lock is held, process context */
static int enic_stop ( struct net_device * netdev )
{
struct enic * enic = netdev_priv ( netdev ) ;
unsigned int i ;
int err ;
2010-06-24 10:52:26 +00:00
for ( i = 0 ; i < enic - > intr_count ; i + + ) {
2009-12-23 13:27:30 +00:00
vnic_intr_mask ( & enic - > intr [ i ] ) ;
2010-06-24 10:52:26 +00:00
( void ) vnic_intr_masked ( & enic - > intr [ i ] ) ; /* flush write */
}
2009-12-23 13:27:30 +00:00
enic_synchronize_irqs ( enic ) ;
2008-09-15 09:17:11 -07:00
del_timer_sync ( & enic - > notify_timer ) ;
2014-06-23 16:08:02 +05:30
enic_rfs_flw_tbl_free ( enic ) ;
2008-09-15 09:17:11 -07:00
2010-06-24 10:50:12 +00:00
enic_dev_disable ( enic ) ;
2010-10-20 10:16:59 +00:00
2017-02-03 17:28:21 -05:00
for ( i = 0 ; i < enic - > rq_count ; i + + )
2010-10-20 10:16:59 +00:00
napi_disable ( & enic - > napi [ i ] ) ;
2009-12-23 13:27:30 +00:00
netif_carrier_off ( netdev ) ;
2014-06-23 16:08:05 +05:30
if ( vnic_dev_get_intr_mode ( enic - > vdev ) = = VNIC_DEV_INTR_MODE_MSIX )
for ( i = 0 ; i < enic - > wq_count ; i + + )
napi_disable ( & enic - > napi [ enic_cq_wq ( enic , i ) ] ) ;
2020-02-12 06:09:17 +01:00
netif_tx_disable ( netdev ) ;
2011-09-22 03:44:43 +00:00
2012-01-18 04:24:02 +00:00
if ( ! enic_is_dynamic ( enic ) & & ! enic_is_sriov_vf ( enic ) )
2010-12-08 13:54:03 +00:00
enic_dev_del_station_addr ( enic ) ;
2010-05-17 22:50:19 -07:00
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > wq_count ; i + + ) {
err = vnic_wq_disable ( & enic - > wq [ i ] ) ;
if ( err )
return err ;
}
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
err = vnic_rq_disable ( & enic - > rq [ i ] ) ;
if ( err )
return err ;
}
2010-06-24 10:50:12 +00:00
enic_dev_notify_unset ( enic ) ;
2015-10-30 16:52:51 +05:30
enic_unset_affinity_hint ( enic ) ;
2008-09-24 11:23:53 -07:00
enic_free_intr ( enic ) ;
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > wq_count ; i + + )
vnic_wq_clean ( & enic - > wq [ i ] , enic_free_wq_buf ) ;
for ( i = 0 ; i < enic - > rq_count ; i + + )
vnic_rq_clean ( & enic - > rq [ i ] , enic_free_rq_buf ) ;
for ( i = 0 ; i < enic - > cq_count ; i + + )
vnic_cq_clean ( & enic - > cq [ i ] ) ;
for ( i = 0 ; i < enic - > intr_count ; i + + )
vnic_intr_clean ( & enic - > intr [ i ] ) ;
return 0 ;
}
2018-07-27 11:19:29 -07:00
static int _enic_change_mtu ( struct net_device * netdev , int new_mtu )
{
bool running = netif_running ( netdev ) ;
int err = 0 ;
ASSERT_RTNL ( ) ;
if ( running ) {
err = enic_stop ( netdev ) ;
if ( err )
return err ;
}
netdev - > mtu = new_mtu ;
if ( running ) {
err = enic_open ( netdev ) ;
if ( err )
return err ;
}
return 0 ;
}
2008-09-15 09:17:11 -07:00
static int enic_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct enic * enic = netdev_priv ( netdev ) ;
2012-01-18 04:24:02 +00:00
if ( enic_is_dynamic ( enic ) | | enic_is_sriov_vf ( enic ) )
2011-06-03 14:35:17 +00:00
return - EOPNOTSUPP ;
2008-09-15 09:17:11 -07:00
if ( netdev - > mtu > enic - > port_mtu )
2010-06-24 10:50:56 +00:00
netdev_warn ( netdev ,
2018-07-27 11:19:29 -07:00
" interface MTU (%d) set higher than port MTU (%d) \n " ,
netdev - > mtu , enic - > port_mtu ) ;
2008-09-15 09:17:11 -07:00
2018-07-27 11:19:29 -07:00
return _enic_change_mtu ( netdev , new_mtu ) ;
2008-09-15 09:17:11 -07:00
}
2011-06-03 14:35:17 +00:00
static void enic_change_mtu_work ( struct work_struct * work )
{
struct enic * enic = container_of ( work , struct enic , change_mtu_work ) ;
struct net_device * netdev = enic - > netdev ;
int new_mtu = vnic_dev_mtu ( enic - > vdev ) ;
rtnl_lock ( ) ;
2018-07-27 11:19:29 -07:00
( void ) _enic_change_mtu ( netdev , new_mtu ) ;
2011-06-03 14:35:17 +00:00
rtnl_unlock ( ) ;
netdev_info ( netdev , " interface MTU set as %d \n " , netdev - > mtu ) ;
}
2008-09-15 09:17:11 -07:00
# ifdef CONFIG_NET_POLL_CONTROLLER
static void enic_poll_controller ( struct net_device * netdev )
{
struct enic * enic = netdev_priv ( netdev ) ;
struct vnic_dev * vdev = enic - > vdev ;
2010-10-20 10:16:59 +00:00
unsigned int i , intr ;
2008-09-15 09:17:11 -07:00
switch ( vnic_dev_get_intr_mode ( vdev ) ) {
case VNIC_DEV_INTR_MODE_MSIX :
2010-10-20 10:16:59 +00:00
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
intr = enic_msix_rq_intr ( enic , i ) ;
2014-06-23 16:08:05 +05:30
enic_isr_msix ( enic - > msix_entry [ intr ] . vector ,
& enic - > napi [ i ] ) ;
2010-10-20 10:16:59 +00:00
}
2011-06-09 10:37:07 +00:00
for ( i = 0 ; i < enic - > wq_count ; i + + ) {
intr = enic_msix_wq_intr ( enic , i ) ;
2014-06-23 16:08:05 +05:30
enic_isr_msix ( enic - > msix_entry [ intr ] . vector ,
& enic - > napi [ enic_cq_wq ( enic , i ) ] ) ;
2011-06-09 10:37:07 +00:00
}
2008-09-15 09:17:11 -07:00
break ;
case VNIC_DEV_INTR_MODE_MSI :
enic_isr_msi ( enic - > pdev - > irq , enic ) ;
break ;
case VNIC_DEV_INTR_MODE_INTX :
enic_isr_legacy ( enic - > pdev - > irq , netdev ) ;
break ;
default :
break ;
}
}
# endif
static int enic_dev_wait ( struct vnic_dev * vdev ,
int ( * start ) ( struct vnic_dev * , int ) ,
int ( * finished ) ( struct vnic_dev * , int * ) ,
int arg )
{
unsigned long time ;
int done ;
int err ;
err = start ( vdev , arg ) ;
if ( err )
return err ;
/* Wait for func to complete...2 seconds max
*/
time = jiffies + ( HZ * 2 ) ;
do {
err = finished ( vdev , & done ) ;
if ( err )
return err ;
if ( done )
return 0 ;
schedule_timeout_uninterruptible ( HZ / 10 ) ;
} while ( time_after ( time , jiffies ) ) ;
return - ETIMEDOUT ;
}
static int enic_dev_open ( struct enic * enic )
{
int err ;
2018-03-01 11:07:24 -08:00
u32 flags = CMD_OPENF_IG_DESCCACHE ;
2008-09-15 09:17:11 -07:00
err = enic_dev_wait ( enic - > vdev , vnic_dev_open ,
2018-03-01 11:07:24 -08:00
vnic_dev_open_done , flags ) ;
2008-09-15 09:17:11 -07:00
if ( err )
2010-06-24 10:50:56 +00:00
dev_err ( enic_get_dev ( enic ) , " vNIC device open failed, err %d \n " ,
err ) ;
2008-09-15 09:17:11 -07:00
return err ;
}
2015-10-01 14:18:47 +05:30
static int enic_dev_soft_reset ( struct enic * enic )
{
int err ;
err = enic_dev_wait ( enic - > vdev , vnic_dev_soft_reset ,
vnic_dev_soft_reset_done , 0 ) ;
if ( err )
netdev_err ( enic - > netdev , " vNIC soft reset failed, err %d \n " ,
err ) ;
return err ;
}
2010-06-24 10:50:00 +00:00
static int enic_dev_hang_reset ( struct enic * enic )
2008-09-15 09:17:11 -07:00
{
int err ;
2010-06-24 10:50:00 +00:00
err = enic_dev_wait ( enic - > vdev , vnic_dev_hang_reset ,
vnic_dev_hang_reset_done , 0 ) ;
2008-09-15 09:17:11 -07:00
if ( err )
2010-06-24 10:50:56 +00:00
netdev_err ( enic - > netdev , " vNIC hang reset failed, err %d \n " ,
err ) ;
2008-09-15 09:17:11 -07:00
return err ;
}
2014-12-10 13:40:23 +05:30
int __enic_set_rsskey ( struct enic * enic )
2010-10-20 10:16:59 +00:00
{
2014-11-23 12:27:41 -08:00
union vnic_rss_key * rss_key_buf_va ;
2010-11-15 08:09:55 +00:00
dma_addr_t rss_key_buf_pa ;
2014-11-23 12:27:41 -08:00
int i , kidx , bidx , err ;
2010-10-20 10:16:59 +00:00
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
rss_key_buf_va = dma_alloc_coherent ( & enic - > pdev - > dev ,
sizeof ( union vnic_rss_key ) ,
& rss_key_buf_pa , GFP_ATOMIC ) ;
2010-10-20 10:16:59 +00:00
if ( ! rss_key_buf_va )
return - ENOMEM ;
2014-11-23 12:27:41 -08:00
for ( i = 0 ; i < ENIC_RSS_LEN ; i + + ) {
kidx = i / ENIC_RSS_BYTES_PER_KEY ;
bidx = i % ENIC_RSS_BYTES_PER_KEY ;
2014-12-10 13:40:23 +05:30
rss_key_buf_va - > key [ kidx ] . b [ bidx ] = enic - > rss_key [ i ] ;
2014-11-23 12:27:41 -08:00
}
2014-06-23 16:08:03 +05:30
spin_lock_bh ( & enic - > devcmd_lock ) ;
2010-10-20 10:16:59 +00:00
err = enic_set_rss_key ( enic ,
rss_key_buf_pa ,
sizeof ( union vnic_rss_key ) ) ;
2014-06-23 16:08:03 +05:30
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2010-10-20 10:16:59 +00:00
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_free_coherent ( & enic - > pdev - > dev , sizeof ( union vnic_rss_key ) ,
rss_key_buf_va , rss_key_buf_pa ) ;
2010-10-20 10:16:59 +00:00
return err ;
}
2014-12-10 13:40:23 +05:30
static int enic_set_rsskey ( struct enic * enic )
{
netdev_rss_key_fill ( enic - > rss_key , ENIC_RSS_LEN ) ;
return __enic_set_rsskey ( enic ) ;
}
2010-10-20 10:16:59 +00:00
static int enic_set_rsscpu ( struct enic * enic , u8 rss_hash_bits )
{
2010-11-15 08:09:55 +00:00
dma_addr_t rss_cpu_buf_pa ;
2010-10-20 10:16:59 +00:00
union vnic_rss_cpu * rss_cpu_buf_va = NULL ;
unsigned int i ;
int err ;
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
rss_cpu_buf_va = dma_alloc_coherent ( & enic - > pdev - > dev ,
sizeof ( union vnic_rss_cpu ) ,
& rss_cpu_buf_pa , GFP_ATOMIC ) ;
2010-10-20 10:16:59 +00:00
if ( ! rss_cpu_buf_va )
return - ENOMEM ;
for ( i = 0 ; i < ( 1 < < rss_hash_bits ) ; i + + )
( * rss_cpu_buf_va ) . cpu [ i / 4 ] . b [ i % 4 ] = i % enic - > rq_count ;
2014-06-23 16:08:03 +05:30
spin_lock_bh ( & enic - > devcmd_lock ) ;
2010-10-20 10:16:59 +00:00
err = enic_set_rss_cpu ( enic ,
rss_cpu_buf_pa ,
sizeof ( union vnic_rss_cpu ) ) ;
2014-06-23 16:08:03 +05:30
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2010-10-20 10:16:59 +00:00
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
dma_free_coherent ( & enic - > pdev - > dev , sizeof ( union vnic_rss_cpu ) ,
rss_cpu_buf_va , rss_cpu_buf_pa ) ;
2010-10-20 10:16:59 +00:00
return err ;
}
static int enic_set_niccfg ( struct enic * enic , u8 rss_default_cpu ,
u8 rss_hash_type , u8 rss_hash_bits , u8 rss_base_cpu , u8 rss_enable )
2009-02-09 23:24:24 -08:00
{
const u8 tso_ipid_split_en = 0 ;
const u8 ig_vlan_strip_en = 1 ;
2010-06-24 10:50:12 +00:00
int err ;
2009-02-09 23:24:24 -08:00
2010-10-20 10:16:59 +00:00
/* Enable VLAN tag stripping.
*/
2009-02-09 23:24:24 -08:00
2014-06-23 16:08:03 +05:30
spin_lock_bh ( & enic - > devcmd_lock ) ;
2010-06-24 10:50:12 +00:00
err = enic_set_nic_cfg ( enic ,
2009-02-09 23:24:24 -08:00
rss_default_cpu , rss_hash_type ,
rss_hash_bits , rss_base_cpu ,
rss_enable , tso_ipid_split_en ,
ig_vlan_strip_en ) ;
2014-06-23 16:08:03 +05:30
spin_unlock_bh ( & enic - > devcmd_lock ) ;
2010-06-24 10:50:12 +00:00
return err ;
}
2010-10-20 10:16:59 +00:00
static int enic_set_rss_nic_cfg ( struct enic * enic )
{
struct device * dev = enic_get_dev ( enic ) ;
const u8 rss_default_cpu = 0 ;
const u8 rss_hash_bits = 7 ;
const u8 rss_base_cpu = 0 ;
2018-06-05 10:14:57 -07:00
u8 rss_hash_type ;
int res ;
2010-10-20 10:16:59 +00:00
u8 rss_enable = ENIC_SETTING ( enic , RSS ) & & ( enic - > rq_count > 1 ) ;
2018-06-05 10:14:57 -07:00
spin_lock_bh ( & enic - > devcmd_lock ) ;
res = vnic_dev_capable_rss_hash_type ( enic - > vdev , & rss_hash_type ) ;
spin_unlock_bh ( & enic - > devcmd_lock ) ;
if ( res ) {
/* defaults for old adapters
*/
rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
NIC_CFG_RSS_HASH_TYPE_IPV6 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 ;
}
2010-10-20 10:16:59 +00:00
if ( rss_enable ) {
if ( ! enic_set_rsskey ( enic ) ) {
if ( enic_set_rsscpu ( enic , rss_hash_bits ) ) {
rss_enable = 0 ;
dev_warn ( dev , " RSS disabled, "
" Failed to set RSS cpu indirection table. " ) ;
}
} else {
rss_enable = 0 ;
dev_warn ( dev , " RSS disabled, Failed to set RSS key. \n " ) ;
}
}
return enic_set_niccfg ( enic , rss_default_cpu , rss_hash_type ,
rss_hash_bits , rss_base_cpu , rss_enable ) ;
2010-06-24 10:49:51 +00:00
}
2020-09-29 22:25:10 +02:00
static void enic_set_api_busy ( struct enic * enic , bool busy )
{
spin_lock ( & enic - > enic_api_lock ) ;
enic - > enic_api_busy = busy ;
spin_unlock ( & enic - > enic_api_lock ) ;
}
2008-09-15 09:17:11 -07:00
static void enic_reset ( struct work_struct * work )
{
struct enic * enic = container_of ( work , struct enic , reset ) ;
if ( ! netif_running ( enic - > netdev ) )
return ;
rtnl_lock ( ) ;
2020-09-29 22:25:10 +02:00
/* Stop any activity from infiniband */
enic_set_api_busy ( enic , true ) ;
2015-10-01 14:18:47 +05:30
enic_stop ( enic - > netdev ) ;
enic_dev_soft_reset ( enic ) ;
enic_reset_addr_lists ( enic ) ;
enic_init_vnic_resources ( enic ) ;
enic_set_rss_nic_cfg ( enic ) ;
enic_dev_set_ig_vlan_rewrite_mode ( enic ) ;
enic_open ( enic - > netdev ) ;
2020-09-29 22:25:10 +02:00
/* Allow infiniband to fiddle with the device again */
enic_set_api_busy ( enic , false ) ;
2015-10-01 14:18:47 +05:30
call_netdevice_notifiers ( NETDEV_REBOOT , enic - > netdev ) ;
rtnl_unlock ( ) ;
}
static void enic_tx_hang_reset ( struct work_struct * work )
{
struct enic * enic = container_of ( work , struct enic , tx_hang_reset ) ;
rtnl_lock ( ) ;
2020-09-29 22:25:10 +02:00
/* Stop any activity from infiniband */
enic_set_api_busy ( enic , true ) ;
2010-06-24 10:50:12 +00:00
enic_dev_hang_notify ( enic ) ;
2008-09-15 09:17:11 -07:00
enic_stop ( enic - > netdev ) ;
2010-06-24 10:50:00 +00:00
enic_dev_hang_reset ( enic ) ;
2011-02-17 08:53:12 +00:00
enic_reset_addr_lists ( enic ) ;
2008-09-15 09:17:11 -07:00
enic_init_vnic_resources ( enic ) ;
2010-10-20 10:16:59 +00:00
enic_set_rss_nic_cfg ( enic ) ;
2010-06-24 10:49:51 +00:00
enic_dev_set_ig_vlan_rewrite_mode ( enic ) ;
2008-09-15 09:17:11 -07:00
enic_open ( enic - > netdev ) ;
2020-09-29 22:25:10 +02:00
/* Allow infiniband to fiddle with the device again */
enic_set_api_busy ( enic , false ) ;
2013-08-16 15:47:41 -07:00
call_netdevice_notifiers ( NETDEV_REBOOT , enic - > netdev ) ;
2008-09-15 09:17:11 -07:00
rtnl_unlock ( ) ;
}
static int enic_set_intr_mode ( struct enic * enic )
{
2010-10-20 10:16:59 +00:00
unsigned int n = min_t ( unsigned int , enic - > rq_count , ENIC_RQ_MAX ) ;
2011-02-17 13:57:19 +00:00
unsigned int m = min_t ( unsigned int , enic - > wq_count , ENIC_WQ_MAX ) ;
2008-09-15 09:17:11 -07:00
unsigned int i ;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
2010-10-20 10:16:59 +00:00
* on system capabilities .
2008-09-15 09:17:11 -07:00
*
* Try MSI - X first
*
* We need n RQs , m WQs , n + m CQs , and n + m + 2 INTRs
* ( the second to last INTR is used for WQ / RQ errors )
* ( the last INTR is used for notifications )
*/
BUG_ON ( ARRAY_SIZE ( enic - > msix_entry ) < n + m + 2 ) ;
for ( i = 0 ; i < n + m + 2 ; i + + )
enic - > msix_entry [ i ] . entry = i ;
2010-10-20 10:16:59 +00:00
/* Use multiple RQs if RSS is enabled
*/
if ( ENIC_SETTING ( enic , RSS ) & &
enic - > config . intr_mode < 1 & &
2008-09-15 09:17:11 -07:00
enic - > rq_count > = n & &
enic - > wq_count > = m & &
enic - > cq_count > = n + m & &
2010-10-20 10:16:59 +00:00
enic - > intr_count > = n + m + 2 ) {
2008-09-15 09:17:11 -07:00
2014-02-18 11:08:02 +01:00
if ( pci_enable_msix_range ( enic - > pdev , enic - > msix_entry ,
n + m + 2 , n + m + 2 ) > 0 ) {
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
enic - > rq_count = n ;
enic - > wq_count = m ;
enic - > cq_count = n + m ;
enic - > intr_count = n + m + 2 ;
2008-09-15 09:17:11 -07:00
2010-10-20 10:16:59 +00:00
vnic_dev_set_intr_mode ( enic - > vdev ,
VNIC_DEV_INTR_MODE_MSIX ) ;
return 0 ;
}
}
if ( enic - > config . intr_mode < 1 & &
enic - > rq_count > = 1 & &
enic - > wq_count > = m & &
enic - > cq_count > = 1 + m & &
enic - > intr_count > = 1 + m + 2 ) {
2014-02-18 11:08:02 +01:00
if ( pci_enable_msix_range ( enic - > pdev , enic - > msix_entry ,
1 + m + 2 , 1 + m + 2 ) > 0 ) {
2010-10-20 10:16:59 +00:00
enic - > rq_count = 1 ;
enic - > wq_count = m ;
enic - > cq_count = 1 + m ;
enic - > intr_count = 1 + m + 2 ;
vnic_dev_set_intr_mode ( enic - > vdev ,
VNIC_DEV_INTR_MODE_MSIX ) ;
return 0 ;
}
2008-09-15 09:17:11 -07:00
}
/* Next try MSI
*
* We need 1 RQ , 1 WQ , 2 CQs , and 1 INTR
*/
if ( enic - > config . intr_mode < 2 & &
enic - > rq_count > = 1 & &
enic - > wq_count > = 1 & &
enic - > cq_count > = 2 & &
enic - > intr_count > = 1 & &
! pci_enable_msi ( enic - > pdev ) ) {
enic - > rq_count = 1 ;
enic - > wq_count = 1 ;
enic - > cq_count = 2 ;
enic - > intr_count = 1 ;
vnic_dev_set_intr_mode ( enic - > vdev , VNIC_DEV_INTR_MODE_MSI ) ;
return 0 ;
}
/* Next try INTx
*
* We need 1 RQ , 1 WQ , 2 CQs , and 3 INTRs
* ( the first INTR is used for WQ / RQ )
* ( the second INTR is used for WQ / RQ errors )
* ( the last INTR is used for notifications )
*/
if ( enic - > config . intr_mode < 3 & &
enic - > rq_count > = 1 & &
enic - > wq_count > = 1 & &
enic - > cq_count > = 2 & &
enic - > intr_count > = 3 ) {
enic - > rq_count = 1 ;
enic - > wq_count = 1 ;
enic - > cq_count = 2 ;
enic - > intr_count = 3 ;
vnic_dev_set_intr_mode ( enic - > vdev , VNIC_DEV_INTR_MODE_INTX ) ;
return 0 ;
}
vnic_dev_set_intr_mode ( enic - > vdev , VNIC_DEV_INTR_MODE_UNKNOWN ) ;
return - EINVAL ;
}
static void enic_clear_intr_mode ( struct enic * enic )
{
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
case VNIC_DEV_INTR_MODE_MSIX :
pci_disable_msix ( enic - > pdev ) ;
break ;
case VNIC_DEV_INTR_MODE_MSI :
pci_disable_msi ( enic - > pdev ) ;
break ;
default :
break ;
}
vnic_dev_set_intr_mode ( enic - > vdev , VNIC_DEV_INTR_MODE_UNKNOWN ) ;
}
2010-05-17 22:50:19 -07:00
static const struct net_device_ops enic_netdev_dynamic_ops = {
. ndo_open = enic_open ,
. ndo_stop = enic_stop ,
. ndo_start_xmit = enic_hard_start_xmit ,
2011-06-08 14:54:02 +00:00
. ndo_get_stats64 = enic_get_stats ,
2010-05-17 22:50:19 -07:00
. ndo_validate_addr = eth_validate_addr ,
2010-12-08 13:19:58 +00:00
. ndo_set_rx_mode = enic_set_rx_mode ,
2010-05-17 22:50:19 -07:00
. ndo_set_mac_address = enic_set_mac_address_dynamic ,
. ndo_change_mtu = enic_change_mtu ,
. ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid ,
. ndo_tx_timeout = enic_tx_timeout ,
. ndo_set_vf_port = enic_set_vf_port ,
. ndo_get_vf_port = enic_get_vf_port ,
2010-12-08 13:53:58 +00:00
. ndo_set_vf_mac = enic_set_vf_mac ,
2010-05-17 22:50:19 -07:00
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = enic_poll_controller ,
# endif
2014-06-23 16:08:02 +05:30
# ifdef CONFIG_RFS_ACCEL
. ndo_rx_flow_steer = enic_rx_flow_steer ,
# endif
2017-02-08 16:43:09 -08:00
. ndo_features_check = enic_features_check ,
2010-05-17 22:50:19 -07:00
} ;
2008-11-19 22:23:26 -08:00
static const struct net_device_ops enic_netdev_ops = {
. ndo_open = enic_open ,
. ndo_stop = enic_stop ,
2008-11-20 20:14:53 -08:00
. ndo_start_xmit = enic_hard_start_xmit ,
2011-06-08 14:54:02 +00:00
. ndo_get_stats64 = enic_get_stats ,
2008-11-19 22:23:26 -08:00
. ndo_validate_addr = eth_validate_addr ,
2010-05-17 22:50:19 -07:00
. ndo_set_mac_address = enic_set_mac_address ,
2010-12-08 13:19:58 +00:00
. ndo_set_rx_mode = enic_set_rx_mode ,
2008-11-19 22:23:26 -08:00
. ndo_change_mtu = enic_change_mtu ,
. ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid ,
. ndo_tx_timeout = enic_tx_timeout ,
2011-09-22 03:44:43 +00:00
. ndo_set_vf_port = enic_set_vf_port ,
. ndo_get_vf_port = enic_get_vf_port ,
. ndo_set_vf_mac = enic_set_vf_mac ,
2008-11-19 22:23:26 -08:00
# ifdef CONFIG_NET_POLL_CONTROLLER
. ndo_poll_controller = enic_poll_controller ,
# endif
2014-06-23 16:08:02 +05:30
# ifdef CONFIG_RFS_ACCEL
. ndo_rx_flow_steer = enic_rx_flow_steer ,
# endif
2017-02-08 16:43:09 -08:00
. ndo_features_check = enic_features_check ,
2008-11-19 22:23:26 -08:00
} ;
2010-09-30 13:35:45 +00:00
static void enic_dev_deinit ( struct enic * enic )
2009-09-03 17:02:45 +00:00
{
2010-10-20 10:16:59 +00:00
unsigned int i ;
2020-09-09 10:37:51 -07:00
for ( i = 0 ; i < enic - > rq_count ; i + + )
__netif_napi_del ( & enic - > napi [ i ] ) ;
2014-06-23 16:08:05 +05:30
if ( vnic_dev_get_intr_mode ( enic - > vdev ) = = VNIC_DEV_INTR_MODE_MSIX )
for ( i = 0 ; i < enic - > wq_count ; i + + )
2020-09-09 10:37:51 -07:00
__netif_napi_del ( & enic - > napi [ enic_cq_wq ( enic , i ) ] ) ;
/* observe RCU grace period after __netif_napi_del() calls */
synchronize_net ( ) ;
2010-10-20 10:16:59 +00:00
2009-09-03 17:02:45 +00:00
enic_free_vnic_resources ( enic ) ;
enic_clear_intr_mode ( enic ) ;
2015-10-30 16:52:51 +05:30
enic_free_affinity_hint ( enic ) ;
2009-09-03 17:02:45 +00:00
}
2015-01-03 19:35:44 +05:30
static void enic_kdump_kernel_config ( struct enic * enic )
{
if ( is_kdump_kernel ( ) ) {
dev_info ( enic_get_dev ( enic ) , " Running from within kdump kernel. Using minimal resources \n " ) ;
enic - > rq_count = 1 ;
enic - > wq_count = 1 ;
enic - > config . rq_desc_count = ENIC_MIN_RQ_DESCS ;
enic - > config . wq_desc_count = ENIC_MIN_WQ_DESCS ;
enic - > config . mtu = min_t ( u16 , 1500 , enic - > config . mtu ) ;
}
}
2010-09-30 13:35:45 +00:00
static int enic_dev_init ( struct enic * enic )
2009-09-03 17:02:45 +00:00
{
2010-06-24 10:50:56 +00:00
struct device * dev = enic_get_dev ( enic ) ;
2009-09-03 17:02:45 +00:00
struct net_device * netdev = enic - > netdev ;
2010-10-20 10:16:59 +00:00
unsigned int i ;
2009-09-03 17:02:45 +00:00
int err ;
2011-06-17 07:56:48 +00:00
/* Get interrupt coalesce timer info */
err = enic_dev_intr_coal_timer_info ( enic ) ;
if ( err ) {
dev_warn ( dev , " Using default conversion factor for "
" interrupt coalesce timer \n " ) ;
vnic_dev_intr_coal_timer_info_default ( enic - > vdev ) ;
}
2009-09-03 17:02:45 +00:00
/* Get vNIC configuration
*/
err = enic_get_vnic_config ( enic ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Get vNIC configuration failed, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
return err ;
}
/* Get available resource counts
*/
enic_get_res_counts ( enic ) ;
2015-01-03 19:35:44 +05:30
/* modify resource count if we are in kdump_kernel
*/
enic_kdump_kernel_config ( enic ) ;
2009-09-03 17:02:45 +00:00
/* Set interrupt mode based on resource counts and system
* capabilities
*/
err = enic_set_intr_mode ( enic ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Failed to set intr mode based on resource "
" counts and system capabilities, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
return err ;
}
/* Allocate and configure vNIC resources
*/
err = enic_alloc_vnic_resources ( enic ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Failed to alloc vNIC resources, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
goto err_out_free_vnic_resources ;
}
enic_init_vnic_resources ( enic ) ;
2010-10-20 10:16:59 +00:00
err = enic_set_rss_nic_cfg ( enic ) ;
2009-09-03 17:02:45 +00:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Failed to config nic, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
goto err_out_free_vnic_resources ;
}
switch ( vnic_dev_get_intr_mode ( enic - > vdev ) ) {
default :
2010-10-20 10:16:59 +00:00
netif_napi_add ( netdev , & enic - > napi [ 0 ] , enic_poll , 64 ) ;
2009-09-03 17:02:45 +00:00
break ;
case VNIC_DEV_INTR_MODE_MSIX :
2014-06-23 16:08:04 +05:30
for ( i = 0 ; i < enic - > rq_count ; i + + ) {
2010-10-20 10:16:59 +00:00
netif_napi_add ( netdev , & enic - > napi [ i ] ,
2014-06-23 16:08:05 +05:30
enic_poll_msix_rq , NAPI_POLL_WEIGHT ) ;
2014-06-23 16:08:04 +05:30
}
2014-06-23 16:08:05 +05:30
for ( i = 0 ; i < enic - > wq_count ; i + + )
netif_napi_add ( netdev , & enic - > napi [ enic_cq_wq ( enic , i ) ] ,
enic_poll_msix_wq , NAPI_POLL_WEIGHT ) ;
2009-09-03 17:02:45 +00:00
break ;
}
return 0 ;
err_out_free_vnic_resources :
2015-10-30 16:52:51 +05:30
enic_free_affinity_hint ( enic ) ;
2009-09-03 17:02:45 +00:00
enic_clear_intr_mode ( enic ) ;
enic_free_vnic_resources ( enic ) ;
return err ;
}
2009-09-03 17:01:53 +00:00
static void enic_iounmap ( struct enic * enic )
{
unsigned int i ;
for ( i = 0 ; i < ARRAY_SIZE ( enic - > bar ) ; i + + )
if ( enic - > bar [ i ] . vaddr )
iounmap ( enic - > bar [ i ] . vaddr ) ;
}
2012-12-06 14:30:56 +00:00
static int enic_probe ( struct pci_dev * pdev , const struct pci_device_id * ent )
2008-09-15 09:17:11 -07:00
{
2010-06-24 10:50:56 +00:00
struct device * dev = & pdev - > dev ;
2008-09-15 09:17:11 -07:00
struct net_device * netdev ;
struct enic * enic ;
int using_dac = 0 ;
unsigned int i ;
int err ;
2011-09-22 03:44:33 +00:00
# ifdef CONFIG_PCI_IOV
int pos = 0 ;
# endif
2012-01-19 22:25:36 +00:00
int num_pps = 1 ;
2008-09-15 09:17:11 -07:00
/* Allocate net device structure and initialize. Private
* instance data is initialized to zero .
*/
2013-09-04 11:17:14 +05:30
netdev = alloc_etherdev_mqs ( sizeof ( struct enic ) ,
ENIC_RQ_MAX , ENIC_WQ_MAX ) ;
2012-01-29 13:47:52 +00:00
if ( ! netdev )
2008-09-15 09:17:11 -07:00
return - ENOMEM ;
pci_set_drvdata ( pdev , netdev ) ;
SET_NETDEV_DEV ( netdev , & pdev - > dev ) ;
enic = netdev_priv ( netdev ) ;
enic - > netdev = netdev ;
enic - > pdev = pdev ;
/* Setup PCI resources
*/
2010-06-24 10:52:26 +00:00
err = pci_enable_device_mem ( pdev ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Cannot enable PCI device, aborting \n " ) ;
2008-09-15 09:17:11 -07:00
goto err_out_free_netdev ;
}
err = pci_request_regions ( pdev , DRV_NAME ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Cannot request PCI regions, aborting \n " ) ;
2008-09-15 09:17:11 -07:00
goto err_out_disable_device ;
}
pci_set_master ( pdev ) ;
/* Query PCI controller on system for DMA addressing
2018-05-23 11:17:39 -07:00
* limitation for the device . Try 47 - bit first , and
2008-09-15 09:17:11 -07:00
* fail to 32 - bit .
*/
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
err = dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 47 ) ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
err = dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " No usable DMA configuration, aborting \n " ) ;
2008-09-15 09:17:11 -07:00
goto err_out_release_regions ;
}
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
err = dma_set_coherent_mask ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Unable to obtain %u-bit DMA "
" for consistent allocations, aborting \n " , 32 ) ;
2008-09-15 09:17:11 -07:00
goto err_out_release_regions ;
}
} else {
enic: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away.
The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.
When memory is allocated in 'vnic_dev_classifier()', 'vnic_dev_fw_info()',
'vnic_dev_notify_set()' and 'vnic_dev_stats_dump()' (vnic_dev.c) GFP_ATOMIC
must be used because its callers take a spinlock before calling these
functions.
When memory is allocated in '__enic_set_rsskey()' and 'enic_set_rsscpu()'
GFP_ATOMIC must be used because they can be called with a spinlock.
The call chain is:
enic_reset <-- takes 'enic->enic_api_lock'
--> enic_set_rss_nic_cfg
--> enic_set_rsskey
--> __enic_set_rsskey <-- uses dma_alloc_coherent
--> enic_set_rsscpu <-- uses dma_alloc_coherent
When memory is allocated in 'vnic_dev_init_prov2()' GFP_ATOMIC must be used
because a spinlock is hidden in the ENIC_DEVCMD_PROXY_BY_INDEX macro, when
this function is called in 'enic_set_port_profile()'.
When memory is allocated in 'vnic_dev_alloc_desc_ring()' GFP_KERNEL can be
used because it is only called from 5 functions ('vnic_dev_init_devcmd2()',
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()' and
'enic_wq_devcmd2_alloc()'.
'vnic_dev_init_devcmd2()': already uses GFP_KERNEL and no lock is taken
in the between.
'enic_wq_devcmd2_alloc()': is called from ' vnic_dev_init_devcmd2()'
which already uses GFP_KERNEL and no lock is taken in the between.
'vnic_cq_alloc()', 'vnic_rq_alloc()', 'vnic_wq_alloc()': are called
from 'enic_alloc_vnic_resources()'
'enic_alloc_vnic_resources()' has only 2 call chains:
1) enic_probe
--> enic_dev_init
--> enic_alloc_vnic_resources
'enic_probe()' is a probe function and no lock is taken in the between
2) enic_set_ringparam
--> enic_alloc_vnic_resources
'enic_set_ringparam()' is a .set_ringparam function (see struct
ethtool_ops). It seems to only take a mutex and no spinlock.
So all paths are safe to use GFP_KERNEL.
@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL
@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE
@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE
@@
@@
- PCI_DMA_NONE
+ DMA_NONE
@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)
@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)
@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)
@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)
@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-09-06 14:45:41 +02:00
err = dma_set_coherent_mask ( & pdev - > dev , DMA_BIT_MASK ( 47 ) ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Unable to obtain %u-bit DMA "
2018-05-23 11:17:39 -07:00
" for consistent allocations, aborting \n " , 47 ) ;
2008-09-15 09:17:11 -07:00
goto err_out_release_regions ;
}
using_dac = 1 ;
}
2009-09-03 17:01:53 +00:00
/* Map vNIC resources from BAR0-5
2008-09-15 09:17:11 -07:00
*/
2009-09-03 17:01:53 +00:00
for ( i = 0 ; i < ARRAY_SIZE ( enic - > bar ) ; i + + ) {
if ( ! ( pci_resource_flags ( pdev , i ) & IORESOURCE_MEM ) )
continue ;
enic - > bar [ i ] . len = pci_resource_len ( pdev , i ) ;
enic - > bar [ i ] . vaddr = pci_iomap ( pdev , i , enic - > bar [ i ] . len ) ;
if ( ! enic - > bar [ i ] . vaddr ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Cannot memory-map BAR %d, aborting \n " , i ) ;
2009-09-03 17:01:53 +00:00
err = - ENODEV ;
goto err_out_iounmap ;
}
enic - > bar [ i ] . bus_addr = pci_resource_start ( pdev , i ) ;
2008-09-15 09:17:11 -07:00
}
/* Register vNIC device
*/
2009-09-03 17:01:53 +00:00
enic - > vdev = vnic_dev_register ( NULL , enic , pdev , enic - > bar ,
ARRAY_SIZE ( enic - > bar ) ) ;
2008-09-15 09:17:11 -07:00
if ( ! enic - > vdev ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " vNIC registration failed, aborting \n " ) ;
2008-09-15 09:17:11 -07:00
err = - ENODEV ;
goto err_out_iounmap ;
}
2015-08-16 01:44:54 +05:30
err = vnic_devcmd_init ( enic - > vdev ) ;
if ( err )
goto err_out_vnic_unregister ;
2011-09-22 03:44:33 +00:00
# ifdef CONFIG_PCI_IOV
/* Get number of subvnics */
pos = pci_find_ext_capability ( pdev , PCI_EXT_CAP_ID_SRIOV ) ;
if ( pos ) {
pci_read_config_word ( pdev , pos + PCI_SRIOV_TOTAL_VF ,
2012-02-29 21:19:54 +00:00
& enic - > num_vfs ) ;
2011-09-22 03:44:33 +00:00
if ( enic - > num_vfs ) {
err = pci_enable_sriov ( pdev , enic - > num_vfs ) ;
if ( err ) {
dev_err ( dev , " SRIOV enable failed, aborting. "
" pci_enable_sriov() returned %d \n " ,
err ) ;
goto err_out_vnic_unregister ;
}
enic - > priv_flags | = ENIC_SRIOV_ENABLED ;
2012-01-19 22:25:36 +00:00
num_pps = enic - > num_vfs ;
2011-09-22 03:44:33 +00:00
}
}
# endif
2012-01-18 04:24:07 +00:00
2011-09-22 03:44:43 +00:00
/* Allocate structure for port profiles */
2011-11-29 11:08:00 +00:00
enic - > pp = kcalloc ( num_pps , sizeof ( * enic - > pp ) , GFP_KERNEL ) ;
2011-09-22 03:44:43 +00:00
if ( ! enic - > pp ) {
err = - ENOMEM ;
2012-01-18 04:24:07 +00:00
goto err_out_disable_sriov_pp ;
2011-09-22 03:44:43 +00:00
}
2008-09-15 09:17:11 -07:00
/* Issue device open to get device in known state
*/
err = enic_dev_open ( enic ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " vNIC dev open failed, aborting \n " ) ;
2012-01-18 04:24:07 +00:00
goto err_out_disable_sriov ;
2008-09-15 09:17:11 -07:00
}
2011-02-04 16:17:16 +00:00
/* Setup devcmd lock
*/
spin_lock_init ( & enic - > devcmd_lock ) ;
2013-08-16 15:47:40 -07:00
spin_lock_init ( & enic - > enic_api_lock ) ;
2011-02-04 16:17:16 +00:00
/*
* Set ingress vlan rewrite mode before vnic initialization
*/
err = enic_dev_set_ig_vlan_rewrite_mode ( enic ) ;
if ( err ) {
dev_err ( dev ,
" Failed to set ingress vlan rewrite mode, aborting. \n " ) ;
goto err_out_dev_close ;
}
2008-09-15 09:17:11 -07:00
/* Issue device init to initialize the vnic-to-switch link.
* We ' ll start with carrier off and wait for link UP
* notification later to turn on carrier . We don ' t need
* to wait here for the vnic - to - switch link initialization
* to complete ; link UP notification is the indication that
* the process is complete .
*/
netif_carrier_off ( netdev ) ;
2010-06-24 10:50:56 +00:00
/* Do not call dev_init for a dynamic vnic.
* For a dynamic vnic , init_prov_info will be
* called later by an upper layer .
*/
2012-02-20 00:12:04 +00:00
if ( ! enic_is_dynamic ( enic ) ) {
2010-05-17 22:50:19 -07:00
err = vnic_dev_init ( enic - > vdev , 0 ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " vNIC dev init failed, aborting \n " ) ;
2010-05-17 22:50:19 -07:00
goto err_out_dev_close ;
}
2008-09-15 09:17:11 -07:00
}
2009-09-03 17:02:45 +00:00
err = enic_dev_init ( enic ) ;
2008-09-15 09:17:11 -07:00
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Device initialization failed, aborting \n " ) ;
2008-09-15 09:17:11 -07:00
goto err_out_dev_close ;
}
2013-09-04 11:17:14 +05:30
netif_set_real_num_tx_queues ( netdev , enic - > wq_count ) ;
2013-09-04 11:17:15 +05:30
netif_set_real_num_rx_queues ( netdev , enic - > rq_count ) ;
2013-09-04 11:17:14 +05:30
2010-06-24 10:50:12 +00:00
/* Setup notification timer, HW reset task, and wq locks
2008-09-15 09:17:11 -07:00
*/
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 14:43:17 -07:00
timer_setup ( & enic - > notify_timer , enic_notify_timer , 0 ) ;
2008-09-15 09:17:11 -07:00
2018-06-19 08:15:24 -07:00
enic_rfs_flw_tbl_init ( enic ) ;
2014-05-20 03:14:05 +05:30
enic_set_rx_coal_setting ( enic ) ;
2008-09-15 09:17:11 -07:00
INIT_WORK ( & enic - > reset , enic_reset ) ;
2015-10-01 14:18:47 +05:30
INIT_WORK ( & enic - > tx_hang_reset , enic_tx_hang_reset ) ;
2011-06-03 14:35:17 +00:00
INIT_WORK ( & enic - > change_mtu_work , enic_change_mtu_work ) ;
2008-09-15 09:17:11 -07:00
for ( i = 0 ; i < enic - > wq_count ; i + + )
spin_lock_init ( & enic - > wq_lock [ i ] ) ;
/* Register net device
*/
enic - > port_mtu = enic - > config . mtu ;
err = enic_set_mac_addr ( netdev , enic - > mac_addr ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Invalid MAC address, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
goto err_out_dev_deinit ;
2008-09-15 09:17:11 -07:00
}
2009-12-23 13:27:54 +00:00
enic - > tx_coalesce_usecs = enic - > config . intr_timer_usec ;
2014-05-20 03:14:05 +05:30
/* rx coalesce time already got initialized. This gets used
* if adaptive coal is turned off
*/
2009-12-23 13:27:54 +00:00
enic - > rx_coalesce_usecs = enic - > tx_coalesce_usecs ;
2012-01-18 04:24:02 +00:00
if ( enic_is_dynamic ( enic ) | | enic_is_sriov_vf ( enic ) )
2010-05-17 22:50:19 -07:00
netdev - > netdev_ops = & enic_netdev_dynamic_ops ;
else
netdev - > netdev_ops = & enic_netdev_ops ;
2008-09-15 09:17:11 -07:00
netdev - > watchdog_timeo = 2 * HZ ;
2013-07-22 09:59:18 -07:00
enic_set_ethtool_ops ( netdev ) ;
2008-09-15 09:17:11 -07:00
2013-04-19 02:04:27 +00:00
netdev - > features | = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
2010-06-24 10:51:59 +00:00
if ( ENIC_SETTING ( enic , LOOP ) ) {
2013-04-19 02:04:27 +00:00
netdev - > features & = ~ NETIF_F_HW_VLAN_CTAG_TX ;
2010-06-24 10:51:59 +00:00
enic - > loop_enable = 1 ;
enic - > loop_tag = enic - > config . loop_tag ;
dev_info ( dev , " loopback tag=0x%04x \n " , enic - > loop_tag ) ;
}
2008-09-15 09:17:11 -07:00
if ( ENIC_SETTING ( enic , TXCSUM ) )
2011-04-07 02:43:48 +00:00
netdev - > hw_features | = NETIF_F_SG | NETIF_F_HW_CSUM ;
2008-09-15 09:17:11 -07:00
if ( ENIC_SETTING ( enic , TSO ) )
2011-04-07 02:43:48 +00:00
netdev - > hw_features | = NETIF_F_TSO |
2008-09-15 09:17:11 -07:00
NETIF_F_TSO6 | NETIF_F_TSO_ECN ;
2013-09-04 11:17:15 +05:30
if ( ENIC_SETTING ( enic , RSS ) )
netdev - > hw_features | = NETIF_F_RXHASH ;
2011-04-07 02:43:48 +00:00
if ( ENIC_SETTING ( enic , RXCSUM ) )
netdev - > hw_features | = NETIF_F_RXCSUM ;
2017-02-08 16:43:08 -08:00
if ( ENIC_SETTING ( enic , VXLAN ) ) {
u64 patch_level ;
2018-03-01 11:07:20 -08:00
u64 a1 = 0 ;
2017-02-08 16:43:08 -08:00
netdev - > hw_enc_features | = NETIF_F_RXCSUM |
NETIF_F_TSO |
2018-03-01 11:07:20 -08:00
NETIF_F_TSO6 |
2017-02-08 16:43:08 -08:00
NETIF_F_TSO_ECN |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_HW_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM ;
netdev - > hw_features | = netdev - > hw_enc_features ;
/* get bit mask from hw about supported offload bit level
* BIT ( 0 ) = fw supports patch_level 0
* fcoe bit = encap
* fcoe_fc_crc_ok = outer csum ok
* BIT ( 1 ) = always set by fw
* BIT ( 2 ) = fw supports patch_level 2
* BIT ( 0 ) in rss_hash = encap
* BIT ( 1 , 2 ) in rss_hash = outer_ip_csum_ok /
* outer_tcp_csum_ok
* used in enic_rq_indicate_buf
*/
err = vnic_dev_get_supported_feature_ver ( enic - > vdev ,
VIC_FEATURE_VXLAN ,
2018-03-01 11:07:20 -08:00
& patch_level , & a1 ) ;
2017-02-08 16:43:08 -08:00
if ( err )
patch_level = 0 ;
2018-03-01 11:07:20 -08:00
enic - > vxlan . flags = ( u8 ) a1 ;
2017-02-08 16:43:08 -08:00
/* mask bits that are supported by driver
*/
patch_level & = BIT_ULL ( 0 ) | BIT_ULL ( 2 ) ;
patch_level = fls ( patch_level ) ;
patch_level = patch_level ? patch_level - 1 : 0 ;
enic - > vxlan . patch_level = patch_level ;
2020-07-14 12:18:25 -07:00
if ( vnic_dev_get_res_count ( enic - > vdev , RES_TYPE_WQ ) = = 1 | |
enic - > vxlan . flags & ENIC_VXLAN_MULTI_WQ ) {
netdev - > udp_tunnel_nic_info = & enic_udp_tunnels_v4 ;
if ( enic - > vxlan . flags & ENIC_VXLAN_OUTER_IPV6 )
netdev - > udp_tunnel_nic_info = & enic_udp_tunnels ;
}
2017-02-08 16:43:08 -08:00
}
2011-04-07 02:43:48 +00:00
netdev - > features | = netdev - > hw_features ;
2016-04-16 00:40:43 +05:30
netdev - > vlan_features | = netdev - > features ;
2011-04-07 02:43:48 +00:00
2014-06-23 16:08:02 +05:30
# ifdef CONFIG_RFS_ACCEL
netdev - > hw_features | = NETIF_F_NTUPLE ;
# endif
2008-09-15 09:17:11 -07:00
if ( using_dac )
netdev - > features | = NETIF_F_HIGHDMA ;
2011-08-16 06:29:00 +00:00
netdev - > priv_flags | = IFF_UNICAST_FLT ;
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-17 15:54:17 -04:00
/* MTU range: 68 - 9000 */
netdev - > min_mtu = ENIC_MIN_MTU ;
netdev - > max_mtu = ENIC_MAX_MTU ;
2018-07-30 09:56:54 -07:00
netdev - > mtu = enic - > port_mtu ;
ethernet: use core min/max MTU checking
et131x: min_mtu 64, max_mtu 9216
altera_tse: min_mtu 64, max_mtu 1500
amd8111e: min_mtu 60, max_mtu 9000
bnad: min_mtu 46, max_mtu 9000
macb: min_mtu 68, max_mtu 1500 or 10240 depending on hardware capability
xgmac: min_mtu 46, max_mtu 9000
cxgb2: min_mtu 68, max_mtu 9582 (pm3393) or 9600 (vsc7326)
enic: min_mtu 68, max_mtu 9000
gianfar: min_mtu 50, max_mu 9586
hns_enet: min_mtu 68, max_mtu 9578 (v1) or 9706 (v2)
ksz884x: min_mtu 60, max_mtu 1894
myri10ge: min_mtu 68, max_mtu 9000
natsemi: min_mtu 64, max_mtu 2024
nfp: min_mtu 68, max_mtu hardware-specific
forcedeth: min_mtu 64, max_mtu 1500 or 9100, depending on hardware
pch_gbe: min_mtu 46, max_mtu 10300
pasemi_mac: min_mtu 64, max_mtu 9000
qcaspi: min_mtu 46, max_mtu 1500
- remove qcaspi_netdev_change_mtu as it is now redundant
rocker: min_mtu 68, max_mtu 9000
sxgbe: min_mtu 68, max_mtu 9000
stmmac: min_mtu 46, max_mtu depends on hardware
tehuti: min_mtu 60, max_mtu 16384
- driver had no max mtu checking, but product docs say 16k jumbo packets
are supported by the hardware
netcp: min_mtu 68, max_mtu 9486
- remove netcp_ndo_change_mtu as it is now redundant
via-velocity: min_mtu 64, max_mtu 9000
octeon: min_mtu 46, max_mtu 65370
CC: netdev@vger.kernel.org
CC: Mark Einon <mark.einon@gmail.com>
CC: Vince Bridgers <vbridger@opensource.altera.com>
CC: Rasesh Mody <rasesh.mody@qlogic.com>
CC: Nicolas Ferre <nicolas.ferre@atmel.com>
CC: Santosh Raspatur <santosh@chelsio.com>
CC: Hariprasad S <hariprasad@chelsio.com>
CC: Christian Benvenuti <benve@cisco.com>
CC: Sujith Sankar <ssujith@cisco.com>
CC: Govindarajulu Varadarajan <_govind@gmx.com>
CC: Neel Patel <neepatel@cisco.com>
CC: Claudiu Manoil <claudiu.manoil@freescale.com>
CC: Yisen Zhuang <yisen.zhuang@huawei.com>
CC: Salil Mehta <salil.mehta@huawei.com>
CC: Hyong-Youb Kim <hykim@myri.com>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Olof Johansson <olof@lixom.net>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Byungho An <bh74.an@samsung.com>
CC: Girish K S <ks.giri@samsung.com>
CC: Vipul Pandya <vipul.pandya@samsung.com>
CC: Giuseppe Cavallaro <peppe.cavallaro@st.com>
CC: Alexandre Torgue <alexandre.torgue@st.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Wingman Kwok <w-kwok2@ti.com>
CC: Murali Karicheri <m-karicheri2@ti.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-17 15:54:17 -04:00
2008-09-15 09:17:11 -07:00
err = register_netdev ( netdev ) ;
if ( err ) {
2010-06-24 10:50:56 +00:00
dev_err ( dev , " Cannot register net device, aborting \n " ) ;
2009-09-03 17:02:45 +00:00
goto err_out_dev_deinit ;
2008-09-15 09:17:11 -07:00
}
2014-09-03 03:17:19 +05:30
enic - > rx_copybreak = RX_COPYBREAK_DEFAULT ;
2008-09-15 09:17:11 -07:00
return 0 ;
2009-09-03 17:02:45 +00:00
err_out_dev_deinit :
enic_dev_deinit ( enic ) ;
2008-09-15 09:17:11 -07:00
err_out_dev_close :
vnic_dev_close ( enic - > vdev ) ;
2011-09-22 03:44:33 +00:00
err_out_disable_sriov :
2012-01-18 04:24:07 +00:00
kfree ( enic - > pp ) ;
err_out_disable_sriov_pp :
2011-09-22 03:44:33 +00:00
# ifdef CONFIG_PCI_IOV
if ( enic_sriov_enabled ( enic ) ) {
pci_disable_sriov ( pdev ) ;
enic - > priv_flags & = ~ ENIC_SRIOV_ENABLED ;
}
# endif
2015-08-21 11:38:41 -07:00
err_out_vnic_unregister :
2012-01-18 04:24:12 +00:00
vnic_dev_unregister ( enic - > vdev ) ;
2008-09-15 09:17:11 -07:00
err_out_iounmap :
enic_iounmap ( enic ) ;
err_out_release_regions :
pci_release_regions ( pdev ) ;
err_out_disable_device :
pci_disable_device ( pdev ) ;
err_out_free_netdev :
free_netdev ( netdev ) ;
return err ;
}
2012-12-03 09:23:05 -05:00
static void enic_remove ( struct pci_dev * pdev )
2008-09-15 09:17:11 -07:00
{
struct net_device * netdev = pci_get_drvdata ( pdev ) ;
if ( netdev ) {
struct enic * enic = netdev_priv ( netdev ) ;
2010-12-12 16:45:14 +01:00
cancel_work_sync ( & enic - > reset ) ;
2011-06-03 14:35:17 +00:00
cancel_work_sync ( & enic - > change_mtu_work ) ;
2008-09-15 09:17:11 -07:00
unregister_netdev ( netdev ) ;
2009-09-03 17:02:45 +00:00
enic_dev_deinit ( enic ) ;
2008-09-15 09:17:11 -07:00
vnic_dev_close ( enic - > vdev ) ;
2011-09-22 03:44:33 +00:00
# ifdef CONFIG_PCI_IOV
if ( enic_sriov_enabled ( enic ) ) {
pci_disable_sriov ( pdev ) ;
enic - > priv_flags & = ~ ENIC_SRIOV_ENABLED ;
}
# endif
2011-09-22 03:44:43 +00:00
kfree ( enic - > pp ) ;
2008-09-15 09:17:11 -07:00
vnic_dev_unregister ( enic - > vdev ) ;
enic_iounmap ( enic ) ;
pci_release_regions ( pdev ) ;
pci_disable_device ( pdev ) ;
free_netdev ( netdev ) ;
}
}
static struct pci_driver enic_driver = {
. name = DRV_NAME ,
. id_table = enic_id_table ,
. probe = enic_probe ,
2012-12-03 09:23:05 -05:00
. remove = enic_remove ,
2008-09-15 09:17:11 -07:00
} ;
2021-04-07 15:07:05 +00:00
module_pci_driver ( enic_driver ) ;