net: remove 'fallback' argument from dev->ndo_select_queue()
After the previous patch, all the callers of ndo_select_queue() provide as a 'fallback' argument netdev_pick_tx. The only exceptions are nested calls to ndo_select_queue(), which pass down the 'fallback' available in the current scope - still netdev_pick_tx. We can drop such argument and replace fallback() invocation with netdev_pick_tx(). This avoids an indirect call per xmit packet in some scenarios (TCP syn, UDP unconnected, XDP generic, pktgen) with device drivers implementing such ndo. It also clean the code a bit. Tested with ixgbe and CONFIG_FCOE=m With pktgen using queue xmit: threads vanilla patched (kpps) (kpps) 1 2334 2428 2 4166 4278 4 7895 8100 v1 -> v2: - rebased after helper's name change Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b71b5837f8
commit
a350eccee5
@ -423,8 +423,7 @@ tx_finish:
|
||||
|
||||
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
|
||||
struct opa_vnic_skb_mdata *mdata;
|
||||
|
@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
|
||||
struct opa_vnic_skb_mdata *mdata;
|
||||
@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
||||
mdata = skb_push(skb, sizeof(*mdata));
|
||||
mdata->entropy = opa_vnic_calc_entropy(skb);
|
||||
mdata->vl = opa_vnic_get_vl(adapter, skb);
|
||||
rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
|
||||
sb_dev, fallback);
|
||||
rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
|
||||
skb_pull(skb, sizeof(*mdata));
|
||||
return rc;
|
||||
}
|
||||
|
@ -4114,8 +4114,7 @@ static inline int bond_slave_override(struct bonding *bond,
|
||||
|
||||
|
||||
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
/* This helper function exists to help dev_pick_tx get the correct
|
||||
* destination queue. Using a helper function skips a call to
|
||||
|
@ -2258,8 +2258,7 @@ error_drop_packet:
|
||||
}
|
||||
|
||||
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
u16 qid;
|
||||
/* we suspect that this is good for in--kernel network services that
|
||||
@ -2269,7 +2268,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
if (skb_rx_queue_recorded(skb))
|
||||
qid = skb_get_rx_queue(skb);
|
||||
else
|
||||
qid = fallback(dev, skb, NULL);
|
||||
qid = netdev_pick_tx(dev, skb, NULL);
|
||||
|
||||
return qid;
|
||||
}
|
||||
|
@ -2274,8 +2274,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
|
||||
};
|
||||
|
||||
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
u16 queue = skb_get_queue_mapping(skb);
|
||||
@ -2283,7 +2282,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
unsigned int q, port;
|
||||
|
||||
if (!netdev_uses_dsa(dev))
|
||||
return fallback(dev, skb, NULL);
|
||||
return netdev_pick_tx(dev, skb, NULL);
|
||||
|
||||
/* DSA tagging layer will have configured the correct queue */
|
||||
q = BRCM_TAG_GET_QUEUE(queue);
|
||||
@ -2291,7 +2290,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
|
||||
|
||||
if (unlikely(!tx_ring))
|
||||
return fallback(dev, skb, NULL);
|
||||
return netdev_pick_tx(dev, skb, NULL);
|
||||
|
||||
return tx_ring->index;
|
||||
}
|
||||
|
@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
|
||||
}
|
||||
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
||||
@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* select a non-FCoE queue */
|
||||
return fallback(dev, skb, NULL) %
|
||||
return netdev_pick_tx(dev, skb, NULL) %
|
||||
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||
}
|
||||
|
||||
|
@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
|
||||
|
||||
/* select_queue callback */
|
||||
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
|
||||
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
|
||||
struct bnx2x_fastpath *fp,
|
||||
|
@ -979,8 +979,7 @@ freeout:
|
||||
}
|
||||
|
||||
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
int txq;
|
||||
|
||||
@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return txq;
|
||||
}
|
||||
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
}
|
||||
|
||||
static int closest_timer(const struct sge *s, int time)
|
||||
|
@ -1964,8 +1964,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
|
||||
|
||||
static u16
|
||||
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
@ -1975,7 +1974,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
is_multicast_ether_addr(eth_hdr->h_dest))
|
||||
return 0;
|
||||
else
|
||||
return fallback(ndev, skb, NULL);
|
||||
return netdev_pick_tx(ndev, skb, NULL);
|
||||
}
|
||||
|
||||
static const struct net_device_ops hns_nic_netdev_ops = {
|
||||
|
@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter;
|
||||
struct ixgbe_ring_feature *f;
|
||||
@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
return fallback(dev, skb, sb_dev);
|
||||
return netdev_pick_tx(dev, skb, sb_dev);
|
||||
}
|
||||
|
||||
f = &adapter->ring_feature[RING_F_FCOE];
|
||||
|
@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
|
||||
}
|
||||
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
u16 rings_p_up = priv->num_tx_rings_p_up;
|
||||
|
||||
if (netdev_get_num_tc(dev))
|
||||
return fallback(dev, skb, NULL);
|
||||
return netdev_pick_tx(dev, skb, NULL);
|
||||
|
||||
return fallback(dev, skb, NULL) % rings_p_up;
|
||||
return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
|
||||
}
|
||||
|
||||
static void mlx4_bf_copy(void __iomem *dst, const void *src,
|
||||
|
@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||
|
||||
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
|
||||
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
|
||||
struct mlx4_en_rx_alloc *frame,
|
||||
|
@ -769,8 +769,7 @@ struct mlx5e_profile {
|
||||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
||||
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe *wqe, u16 pi);
|
||||
|
@ -110,11 +110,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
|
||||
#endif
|
||||
|
||||
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
int channel_ix = netdev_pick_tx(dev, skb, NULL);
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int channel_ix = fallback(dev, skb, NULL);
|
||||
u16 num_channels;
|
||||
int up = 0;
|
||||
|
||||
|
@ -498,8 +498,7 @@ struct qede_reload_args {
|
||||
/* Datapath functions definition */
|
||||
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
|
||||
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
netdev_features_t qede_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features);
|
||||
|
@ -1696,8 +1696,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
}
|
||||
|
||||
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
int total_txq;
|
||||
@ -1705,7 +1704,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
|
||||
|
||||
return QEDE_TSS_COUNT(edev) ?
|
||||
fallback(dev, skb, NULL) % total_txq : 0;
|
||||
netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
|
||||
}
|
||||
|
||||
/* 8B udp header + 8B base tunnel header + 32B option length */
|
||||
|
@ -1615,8 +1615,7 @@ drop:
|
||||
}
|
||||
|
||||
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
/* If skb needs TX timestamp, it is handled in network control queue */
|
||||
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
|
||||
|
@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct vnet_port *port = netdev_priv(dev);
|
||||
|
||||
|
@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct vnet *vp = netdev_priv(dev);
|
||||
struct vnet_port *port = __tx_port_find(vp, skb);
|
||||
|
@ -308,7 +308,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
|
||||
* If a valid queue has already been assigned, then use that.
|
||||
* Otherwise compute tx queue based on hash and the send table.
|
||||
*
|
||||
* This is basically similar to default (__netdev_pick_tx) with the added step
|
||||
* This is basically similar to default (netdev_pick_tx) with the added step
|
||||
* of using the host send_table when no other queue has been assigned.
|
||||
*
|
||||
* TODO support XPS - but get_xps_queue not exported
|
||||
@ -331,8 +331,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct net_device_context *ndc = netdev_priv(ndev);
|
||||
struct net_device *vf_netdev;
|
||||
@ -344,10 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
|
||||
|
||||
if (vf_ops->ndo_select_queue)
|
||||
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
|
||||
sb_dev, fallback);
|
||||
txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
|
||||
else
|
||||
txq = fallback(vf_netdev, skb, NULL);
|
||||
txq = netdev_pick_tx(vf_netdev, skb, NULL);
|
||||
|
||||
/* Record the queue selected by VF so that it can be
|
||||
* used for common case where VF has more queues than
|
||||
|
@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
|
||||
|
||||
static u16 net_failover_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct net_failover_info *nfo_info = netdev_priv(dev);
|
||||
struct net_device *primary_dev;
|
||||
@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
|
||||
const struct net_device_ops *ops = primary_dev->netdev_ops;
|
||||
|
||||
if (ops->ndo_select_queue)
|
||||
txq = ops->ndo_select_queue(primary_dev, skb,
|
||||
sb_dev, fallback);
|
||||
txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
|
||||
else
|
||||
txq = fallback(primary_dev, skb, NULL);
|
||||
txq = netdev_pick_tx(primary_dev, skb, NULL);
|
||||
|
||||
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
||||
|
||||
|
@ -1691,8 +1691,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
/*
|
||||
* This helper function exists to help dev_pick_tx get the correct
|
||||
|
@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
u16 ret;
|
||||
|
@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
|
||||
|
||||
static u16
|
||||
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
skb->priority = cfg80211_classify8021d(skb, NULL);
|
||||
return mwifiex_1d_to_wmm_queue[skb->priority];
|
||||
|
@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
|
||||
}
|
||||
|
||||
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int size = vif->hash.size;
|
||||
@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return 0;
|
||||
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
|
||||
return netdev_pick_tx(dev, skb, NULL) %
|
||||
dev->real_num_tx_queues;
|
||||
|
||||
xenvif_set_skb_hash(vif, skb);
|
||||
|
||||
|
@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
u32 hash;
|
||||
|
@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct adapter *padapter = rtw_netdev_priv(dev);
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
@ -404,8 +404,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
|
||||
|
||||
|
||||
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct adapter *padapter = rtw_netdev_priv(dev);
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
|
@ -986,8 +986,7 @@ struct devlink;
|
||||
* those the driver believes to be appropriate.
|
||||
*
|
||||
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
|
||||
* struct net_device *sb_dev,
|
||||
* select_queue_fallback_t fallback);
|
||||
* struct net_device *sb_dev);
|
||||
* Called to decide which queue to use when device supports multiple
|
||||
* transmit queues.
|
||||
*
|
||||
@ -1268,8 +1267,7 @@ struct net_device_ops {
|
||||
netdev_features_t features);
|
||||
u16 (*ndo_select_queue)(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
void (*ndo_change_rx_flags)(struct net_device *dev,
|
||||
int flags);
|
||||
void (*ndo_set_rx_mode)(struct net_device *dev);
|
||||
@ -2641,11 +2639,9 @@ void dev_close_many(struct list_head *head, bool unlink);
|
||||
void dev_disable_lro(struct net_device *dev);
|
||||
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
|
||||
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
struct net_device *sb_dev);
|
||||
int dev_queue_xmit(struct sk_buff *skb);
|
||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||
|
@ -3689,16 +3689,14 @@ get_cpus_map:
|
||||
}
|
||||
|
||||
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_pick_tx_zero);
|
||||
|
||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
|
||||
}
|
||||
@ -3748,8 +3746,7 @@ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (ops->ndo_select_queue)
|
||||
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
|
||||
netdev_pick_tx);
|
||||
queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
|
||||
else
|
||||
queue_index = netdev_pick_tx(dev, skb, sb_dev);
|
||||
|
||||
|
@ -1133,8 +1133,7 @@ static void ieee80211_uninit(struct net_device *dev)
|
||||
|
||||
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
|
||||
}
|
||||
@ -1179,8 +1178,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
|
||||
|
||||
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
|
@ -287,8 +287,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||
#endif
|
||||
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
|
||||
if (ops->ndo_select_queue) {
|
||||
queue_index = ops->ndo_select_queue(dev, skb, NULL,
|
||||
netdev_pick_tx);
|
||||
queue_index = ops->ndo_select_queue(dev, skb, NULL);
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
} else {
|
||||
queue_index = netdev_pick_tx(dev, skb, NULL);
|
||||
|
Loading…
Reference in New Issue
Block a user